You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@hive.apache.org by se...@apache.org on 2018/07/13 01:40:03 UTC

[01/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Repository: hive
Updated Branches:
  refs/heads/master-txnstats 04ea1455f -> 93b9cdd69


http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java
new file mode 100644
index 0000000..97b9c1f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLNotNullConstraint.java
@@ -0,0 +1,1109 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLNotNullConstraint implements org.apache.thrift.TBase<SQLNotNullConstraint, SQLNotNullConstraint._Fields>, java.io.Serializable, Cloneable, Comparable<SQLNotNullConstraint> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLNotNullConstraint");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField NN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("nn_name", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)6);
+  private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7);
+  private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new SQLNotNullConstraintStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new SQLNotNullConstraintTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String table_db; // required
+  private String table_name; // required
+  private String column_name; // required
+  private String nn_name; // required
+  private boolean enable_cstr; // required
+  private boolean validate_cstr; // required
+  private boolean rely_cstr; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    TABLE_DB((short)2, "table_db"),
+    TABLE_NAME((short)3, "table_name"),
+    COLUMN_NAME((short)4, "column_name"),
+    NN_NAME((short)5, "nn_name"),
+    ENABLE_CSTR((short)6, "enable_cstr"),
+    VALIDATE_CSTR((short)7, "validate_cstr"),
+    RELY_CSTR((short)8, "rely_cstr");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // TABLE_DB
+          return TABLE_DB;
+        case 3: // TABLE_NAME
+          return TABLE_NAME;
+        case 4: // COLUMN_NAME
+          return COLUMN_NAME;
+        case 5: // NN_NAME
+          return NN_NAME;
+        case 6: // ENABLE_CSTR
+          return ENABLE_CSTR;
+        case 7: // VALIDATE_CSTR
+          return VALIDATE_CSTR;
+        case 8: // RELY_CSTR
+          return RELY_CSTR;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ENABLE_CSTR_ISSET_ID = 0;
+  private static final int __VALIDATE_CSTR_ISSET_ID = 1;
+  private static final int __RELY_CSTR_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("column_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.NN_NAME, new org.apache.thrift.meta_data.FieldMetaData("nn_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ENABLE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("enable_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.VALIDATE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("validate_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLNotNullConstraint.class, metaDataMap);
+  }
+
+  public SQLNotNullConstraint() {
+  }
+
+  public SQLNotNullConstraint(
+    String catName,
+    String table_db,
+    String table_name,
+    String column_name,
+    String nn_name,
+    boolean enable_cstr,
+    boolean validate_cstr,
+    boolean rely_cstr)
+  {
+    this();
+    this.catName = catName;
+    this.table_db = table_db;
+    this.table_name = table_name;
+    this.column_name = column_name;
+    this.nn_name = nn_name;
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public SQLNotNullConstraint(SQLNotNullConstraint other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetTable_db()) {
+      this.table_db = other.table_db;
+    }
+    if (other.isSetTable_name()) {
+      this.table_name = other.table_name;
+    }
+    if (other.isSetColumn_name()) {
+      this.column_name = other.column_name;
+    }
+    if (other.isSetNn_name()) {
+      this.nn_name = other.nn_name;
+    }
+    this.enable_cstr = other.enable_cstr;
+    this.validate_cstr = other.validate_cstr;
+    this.rely_cstr = other.rely_cstr;
+  }
+
+  public SQLNotNullConstraint deepCopy() {
+    return new SQLNotNullConstraint(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.table_db = null;
+    this.table_name = null;
+    this.column_name = null;
+    this.nn_name = null;
+    setEnable_cstrIsSet(false);
+    this.enable_cstr = false;
+    setValidate_cstrIsSet(false);
+    this.validate_cstr = false;
+    setRely_cstrIsSet(false);
+    this.rely_cstr = false;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getTable_db() {
+    return this.table_db;
+  }
+
+  public void setTable_db(String table_db) {
+    this.table_db = table_db;
+  }
+
+  public void unsetTable_db() {
+    this.table_db = null;
+  }
+
+  /** Returns true if field table_db is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable_db() {
+    return this.table_db != null;
+  }
+
+  public void setTable_dbIsSet(boolean value) {
+    if (!value) {
+      this.table_db = null;
+    }
+  }
+
+  public String getTable_name() {
+    return this.table_name;
+  }
+
+  public void setTable_name(String table_name) {
+    this.table_name = table_name;
+  }
+
+  public void unsetTable_name() {
+    this.table_name = null;
+  }
+
+  /** Returns true if field table_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable_name() {
+    return this.table_name != null;
+  }
+
+  public void setTable_nameIsSet(boolean value) {
+    if (!value) {
+      this.table_name = null;
+    }
+  }
+
+  public String getColumn_name() {
+    return this.column_name;
+  }
+
+  public void setColumn_name(String column_name) {
+    this.column_name = column_name;
+  }
+
+  public void unsetColumn_name() {
+    this.column_name = null;
+  }
+
+  /** Returns true if field column_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetColumn_name() {
+    return this.column_name != null;
+  }
+
+  public void setColumn_nameIsSet(boolean value) {
+    if (!value) {
+      this.column_name = null;
+    }
+  }
+
+  public String getNn_name() {
+    return this.nn_name;
+  }
+
+  public void setNn_name(String nn_name) {
+    this.nn_name = nn_name;
+  }
+
+  public void unsetNn_name() {
+    this.nn_name = null;
+  }
+
+  /** Returns true if field nn_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetNn_name() {
+    return this.nn_name != null;
+  }
+
+  public void setNn_nameIsSet(boolean value) {
+    if (!value) {
+      this.nn_name = null;
+    }
+  }
+
+  public boolean isEnable_cstr() {
+    return this.enable_cstr;
+  }
+
+  public void setEnable_cstr(boolean enable_cstr) {
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+  }
+
+  public void unsetEnable_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field enable_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetEnable_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  public void setEnable_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isValidate_cstr() {
+    return this.validate_cstr;
+  }
+
+  public void setValidate_cstr(boolean validate_cstr) {
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+  }
+
+  public void unsetValidate_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field validate_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidate_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  public void setValidate_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isRely_cstr() {
+    return this.rely_cstr;
+  }
+
+  public void setRely_cstr(boolean rely_cstr) {
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  public void unsetRely_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field rely_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetRely_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  public void setRely_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case TABLE_DB:
+      if (value == null) {
+        unsetTable_db();
+      } else {
+        setTable_db((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTable_name();
+      } else {
+        setTable_name((String)value);
+      }
+      break;
+
+    case COLUMN_NAME:
+      if (value == null) {
+        unsetColumn_name();
+      } else {
+        setColumn_name((String)value);
+      }
+      break;
+
+    case NN_NAME:
+      if (value == null) {
+        unsetNn_name();
+      } else {
+        setNn_name((String)value);
+      }
+      break;
+
+    case ENABLE_CSTR:
+      if (value == null) {
+        unsetEnable_cstr();
+      } else {
+        setEnable_cstr((Boolean)value);
+      }
+      break;
+
+    case VALIDATE_CSTR:
+      if (value == null) {
+        unsetValidate_cstr();
+      } else {
+        setValidate_cstr((Boolean)value);
+      }
+      break;
+
+    case RELY_CSTR:
+      if (value == null) {
+        unsetRely_cstr();
+      } else {
+        setRely_cstr((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case TABLE_DB:
+      return getTable_db();
+
+    case TABLE_NAME:
+      return getTable_name();
+
+    case COLUMN_NAME:
+      return getColumn_name();
+
+    case NN_NAME:
+      return getNn_name();
+
+    case ENABLE_CSTR:
+      return isEnable_cstr();
+
+    case VALIDATE_CSTR:
+      return isValidate_cstr();
+
+    case RELY_CSTR:
+      return isRely_cstr();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case TABLE_DB:
+      return isSetTable_db();
+    case TABLE_NAME:
+      return isSetTable_name();
+    case COLUMN_NAME:
+      return isSetColumn_name();
+    case NN_NAME:
+      return isSetNn_name();
+    case ENABLE_CSTR:
+      return isSetEnable_cstr();
+    case VALIDATE_CSTR:
+      return isSetValidate_cstr();
+    case RELY_CSTR:
+      return isSetRely_cstr();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof SQLNotNullConstraint)
+      return this.equals((SQLNotNullConstraint)that);
+    return false;
+  }
+
+  public boolean equals(SQLNotNullConstraint that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_table_db = true && this.isSetTable_db();
+    boolean that_present_table_db = true && that.isSetTable_db();
+    if (this_present_table_db || that_present_table_db) {
+      if (!(this_present_table_db && that_present_table_db))
+        return false;
+      if (!this.table_db.equals(that.table_db))
+        return false;
+    }
+
+    boolean this_present_table_name = true && this.isSetTable_name();
+    boolean that_present_table_name = true && that.isSetTable_name();
+    if (this_present_table_name || that_present_table_name) {
+      if (!(this_present_table_name && that_present_table_name))
+        return false;
+      if (!this.table_name.equals(that.table_name))
+        return false;
+    }
+
+    boolean this_present_column_name = true && this.isSetColumn_name();
+    boolean that_present_column_name = true && that.isSetColumn_name();
+    if (this_present_column_name || that_present_column_name) {
+      if (!(this_present_column_name && that_present_column_name))
+        return false;
+      if (!this.column_name.equals(that.column_name))
+        return false;
+    }
+
+    boolean this_present_nn_name = true && this.isSetNn_name();
+    boolean that_present_nn_name = true && that.isSetNn_name();
+    if (this_present_nn_name || that_present_nn_name) {
+      if (!(this_present_nn_name && that_present_nn_name))
+        return false;
+      if (!this.nn_name.equals(that.nn_name))
+        return false;
+    }
+
+    boolean this_present_enable_cstr = true;
+    boolean that_present_enable_cstr = true;
+    if (this_present_enable_cstr || that_present_enable_cstr) {
+      if (!(this_present_enable_cstr && that_present_enable_cstr))
+        return false;
+      if (this.enable_cstr != that.enable_cstr)
+        return false;
+    }
+
+    boolean this_present_validate_cstr = true;
+    boolean that_present_validate_cstr = true;
+    if (this_present_validate_cstr || that_present_validate_cstr) {
+      if (!(this_present_validate_cstr && that_present_validate_cstr))
+        return false;
+      if (this.validate_cstr != that.validate_cstr)
+        return false;
+    }
+
+    boolean this_present_rely_cstr = true;
+    boolean that_present_rely_cstr = true;
+    if (this_present_rely_cstr || that_present_rely_cstr) {
+      if (!(this_present_rely_cstr && that_present_rely_cstr))
+        return false;
+      if (this.rely_cstr != that.rely_cstr)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_table_db = true && (isSetTable_db());
+    list.add(present_table_db);
+    if (present_table_db)
+      list.add(table_db);
+
+    boolean present_table_name = true && (isSetTable_name());
+    list.add(present_table_name);
+    if (present_table_name)
+      list.add(table_name);
+
+    boolean present_column_name = true && (isSetColumn_name());
+    list.add(present_column_name);
+    if (present_column_name)
+      list.add(column_name);
+
+    boolean present_nn_name = true && (isSetNn_name());
+    list.add(present_nn_name);
+    if (present_nn_name)
+      list.add(nn_name);
+
+    boolean present_enable_cstr = true;
+    list.add(present_enable_cstr);
+    if (present_enable_cstr)
+      list.add(enable_cstr);
+
+    boolean present_validate_cstr = true;
+    list.add(present_validate_cstr);
+    if (present_validate_cstr)
+      list.add(validate_cstr);
+
+    boolean present_rely_cstr = true;
+    list.add(present_rely_cstr);
+    if (present_rely_cstr)
+      list.add(rely_cstr);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(SQLNotNullConstraint other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable_db()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_db, other.table_db);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTable_name()).compareTo(other.isSetTable_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_name, other.table_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColumn_name()).compareTo(other.isSetColumn_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColumn_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_name, other.column_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNn_name()).compareTo(other.isSetNn_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNn_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nn_name, other.nn_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEnable_cstr()).compareTo(other.isSetEnable_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEnable_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.enable_cstr, other.enable_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidate_cstr()).compareTo(other.isSetValidate_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidate_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validate_cstr, other.validate_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRely_cstr()).compareTo(other.isSetRely_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRely_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rely_cstr, other.rely_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("SQLNotNullConstraint(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("table_db:");
+    if (this.table_db == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table_db);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("table_name:");
+    if (this.table_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("column_name:");
+    if (this.column_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.column_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("nn_name:");
+    if (this.nn_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.nn_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("enable_cstr:");
+    sb.append(this.enable_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validate_cstr:");
+    sb.append(this.validate_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("rely_cstr:");
+    sb.append(this.rely_cstr);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class SQLNotNullConstraintStandardSchemeFactory implements SchemeFactory {
+    public SQLNotNullConstraintStandardScheme getScheme() {
+      return new SQLNotNullConstraintStandardScheme();
+    }
+  }
+
+  private static class SQLNotNullConstraintStandardScheme extends StandardScheme<SQLNotNullConstraint> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SQLNotNullConstraint struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_DB
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.table_db = iprot.readString();
+              struct.setTable_dbIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.table_name = iprot.readString();
+              struct.setTable_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // COLUMN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.column_name = iprot.readString();
+              struct.setColumn_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // NN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.nn_name = iprot.readString();
+              struct.setNn_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // ENABLE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.enable_cstr = iprot.readBool();
+              struct.setEnable_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // VALIDATE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.validate_cstr = iprot.readBool();
+              struct.setValidate_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // RELY_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.rely_cstr = iprot.readBool();
+              struct.setRely_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SQLNotNullConstraint struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.table_db != null) {
+        oprot.writeFieldBegin(TABLE_DB_FIELD_DESC);
+        oprot.writeString(struct.table_db);
+        oprot.writeFieldEnd();
+      }
+      if (struct.table_name != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.table_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.column_name != null) {
+        oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC);
+        oprot.writeString(struct.column_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.nn_name != null) {
+        oprot.writeFieldBegin(NN_NAME_FIELD_DESC);
+        oprot.writeString(struct.nn_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(ENABLE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.enable_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(VALIDATE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.validate_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.rely_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class SQLNotNullConstraintTupleSchemeFactory implements SchemeFactory {
+    public SQLNotNullConstraintTupleScheme getScheme() {
+      return new SQLNotNullConstraintTupleScheme();
+    }
+  }
+
+  private static class SQLNotNullConstraintTupleScheme extends TupleScheme<SQLNotNullConstraint> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstraint struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTable_db()) {
+        optionals.set(1);
+      }
+      if (struct.isSetTable_name()) {
+        optionals.set(2);
+      }
+      if (struct.isSetColumn_name()) {
+        optionals.set(3);
+      }
+      if (struct.isSetNn_name()) {
+        optionals.set(4);
+      }
+      if (struct.isSetEnable_cstr()) {
+        optionals.set(5);
+      }
+      if (struct.isSetValidate_cstr()) {
+        optionals.set(6);
+      }
+      if (struct.isSetRely_cstr()) {
+        optionals.set(7);
+      }
+      oprot.writeBitSet(optionals, 8);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+      if (struct.isSetTable_db()) {
+        oprot.writeString(struct.table_db);
+      }
+      if (struct.isSetTable_name()) {
+        oprot.writeString(struct.table_name);
+      }
+      if (struct.isSetColumn_name()) {
+        oprot.writeString(struct.column_name);
+      }
+      if (struct.isSetNn_name()) {
+        oprot.writeString(struct.nn_name);
+      }
+      if (struct.isSetEnable_cstr()) {
+        oprot.writeBool(struct.enable_cstr);
+      }
+      if (struct.isSetValidate_cstr()) {
+        oprot.writeBool(struct.validate_cstr);
+      }
+      if (struct.isSetRely_cstr()) {
+        oprot.writeBool(struct.rely_cstr);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, SQLNotNullConstraint struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(8);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.table_db = iprot.readString();
+        struct.setTable_dbIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.table_name = iprot.readString();
+        struct.setTable_nameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.column_name = iprot.readString();
+        struct.setColumn_nameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.nn_name = iprot.readString();
+        struct.setNn_nameIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.enable_cstr = iprot.readBool();
+        struct.setEnable_cstrIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.validate_cstr = iprot.readBool();
+        struct.setValidate_cstrIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.rely_cstr = iprot.readBool();
+        struct.setRely_cstrIsSet(true);
+      }
+    }
+  }
+
+}
+


[30/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
new file mode 100644
index 0000000..0f22168
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java
@@ -0,0 +1,447 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DropPartitionsResult implements org.apache.thrift.TBase<DropPartitionsResult, DropPartitionsResult._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsResult");
+
+  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DropPartitionsResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DropPartitionsResultTupleSchemeFactory());
+  }
+
+  private List<Partition> partitions; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARTITIONS((short)1, "partitions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARTITIONS
+          return PARTITIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.PARTITIONS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropPartitionsResult.class, metaDataMap);
+  }
+
+  public DropPartitionsResult() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DropPartitionsResult(DropPartitionsResult other) {
+    if (other.isSetPartitions()) {
+      List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
+      for (Partition other_element : other.partitions) {
+        __this__partitions.add(new Partition(other_element));
+      }
+      this.partitions = __this__partitions;
+    }
+  }
+
+  public DropPartitionsResult deepCopy() {
+    return new DropPartitionsResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partitions = null;
+  }
+
+  public int getPartitionsSize() {
+    return (this.partitions == null) ? 0 : this.partitions.size();
+  }
+
+  public java.util.Iterator<Partition> getPartitionsIterator() {
+    return (this.partitions == null) ? null : this.partitions.iterator();
+  }
+
+  public void addToPartitions(Partition elem) {
+    if (this.partitions == null) {
+      this.partitions = new ArrayList<Partition>();
+    }
+    this.partitions.add(elem);
+  }
+
+  public List<Partition> getPartitions() {
+    return this.partitions;
+  }
+
+  public void setPartitions(List<Partition> partitions) {
+    this.partitions = partitions;
+  }
+
+  public void unsetPartitions() {
+    this.partitions = null;
+  }
+
+  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitions() {
+    return this.partitions != null;
+  }
+
+  public void setPartitionsIsSet(boolean value) {
+    if (!value) {
+      this.partitions = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARTITIONS:
+      if (value == null) {
+        unsetPartitions();
+      } else {
+        setPartitions((List<Partition>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARTITIONS:
+      return getPartitions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARTITIONS:
+      return isSetPartitions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DropPartitionsResult)
+      return this.equals((DropPartitionsResult)that);
+    return false;
+  }
+
+  public boolean equals(DropPartitionsResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partitions = true && this.isSetPartitions();
+    boolean that_present_partitions = true && that.isSetPartitions();
+    if (this_present_partitions || that_present_partitions) {
+      if (!(this_present_partitions && that_present_partitions))
+        return false;
+      if (!this.partitions.equals(that.partitions))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partitions = true && (isSetPartitions());
+    list.add(present_partitions);
+    if (present_partitions)
+      list.add(partitions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DropPartitionsResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DropPartitionsResult(");
+    boolean first = true;
+
+    if (isSetPartitions()) {
+      sb.append("partitions:");
+      if (this.partitions == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitions);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DropPartitionsResultStandardSchemeFactory implements SchemeFactory {
+    public DropPartitionsResultStandardScheme getScheme() {
+      return new DropPartitionsResultStandardScheme();
+    }
+  }
+
+  private static class DropPartitionsResultStandardScheme extends StandardScheme<DropPartitionsResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARTITIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list490 = iprot.readListBegin();
+                struct.partitions = new ArrayList<Partition>(_list490.size);
+                Partition _elem491;
+                for (int _i492 = 0; _i492 < _list490.size; ++_i492)
+                {
+                  _elem491 = new Partition();
+                  _elem491.read(iprot);
+                  struct.partitions.add(_elem491);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partitions != null) {
+        if (struct.isSetPartitions()) {
+          oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+            for (Partition _iter493 : struct.partitions)
+            {
+              _iter493.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DropPartitionsResultTupleSchemeFactory implements SchemeFactory {
+    public DropPartitionsResultTupleScheme getScheme() {
+      return new DropPartitionsResultTupleScheme();
+    }
+  }
+
+  private static class DropPartitionsResultTupleScheme extends TupleScheme<DropPartitionsResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartitions()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPartitions()) {
+        {
+          oprot.writeI32(struct.partitions.size());
+          for (Partition _iter494 : struct.partitions)
+          {
+            _iter494.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list495 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.partitions = new ArrayList<Partition>(_list495.size);
+          Partition _elem496;
+          for (int _i497 = 0; _i497 < _list495.size; ++_i497)
+          {
+            _elem496 = new Partition();
+            _elem496.read(iprot);
+            struct.partitions.add(_elem496);
+          }
+        }
+        struct.setPartitionsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
new file mode 100644
index 0000000..52fae26
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java
@@ -0,0 +1,447 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class EnvironmentContext implements org.apache.thrift.TBase<EnvironmentContext, EnvironmentContext._Fields>, java.io.Serializable, Cloneable, Comparable<EnvironmentContext> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("EnvironmentContext");
+
+  private static final org.apache.thrift.protocol.TField PROPERTIES_FIELD_DESC = new org.apache.thrift.protocol.TField("properties", org.apache.thrift.protocol.TType.MAP, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new EnvironmentContextStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new EnvironmentContextTupleSchemeFactory());
+  }
+
+  private Map<String,String> properties; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PROPERTIES((short)1, "properties");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PROPERTIES
+          return PROPERTIES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PROPERTIES, new org.apache.thrift.meta_data.FieldMetaData("properties", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(EnvironmentContext.class, metaDataMap);
+  }
+
+  public EnvironmentContext() {
+  }
+
+  public EnvironmentContext(
+    Map<String,String> properties)
+  {
+    this();
+    this.properties = properties;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public EnvironmentContext(EnvironmentContext other) {
+    if (other.isSetProperties()) {
+      Map<String,String> __this__properties = new HashMap<String,String>(other.properties);
+      this.properties = __this__properties;
+    }
+  }
+
+  public EnvironmentContext deepCopy() {
+    return new EnvironmentContext(this);
+  }
+
+  @Override
+  public void clear() {
+    this.properties = null;
+  }
+
+  public int getPropertiesSize() {
+    return (this.properties == null) ? 0 : this.properties.size();
+  }
+
+  public void putToProperties(String key, String val) {
+    if (this.properties == null) {
+      this.properties = new HashMap<String,String>();
+    }
+    this.properties.put(key, val);
+  }
+
+  public Map<String,String> getProperties() {
+    return this.properties;
+  }
+
+  public void setProperties(Map<String,String> properties) {
+    this.properties = properties;
+  }
+
+  public void unsetProperties() {
+    this.properties = null;
+  }
+
+  /** Returns true if field properties is set (has been assigned a value) and false otherwise */
+  public boolean isSetProperties() {
+    return this.properties != null;
+  }
+
+  public void setPropertiesIsSet(boolean value) {
+    if (!value) {
+      this.properties = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PROPERTIES:
+      if (value == null) {
+        unsetProperties();
+      } else {
+        setProperties((Map<String,String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PROPERTIES:
+      return getProperties();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PROPERTIES:
+      return isSetProperties();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof EnvironmentContext)
+      return this.equals((EnvironmentContext)that);
+    return false;
+  }
+
+  public boolean equals(EnvironmentContext that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_properties = true && this.isSetProperties();
+    boolean that_present_properties = true && that.isSetProperties();
+    if (this_present_properties || that_present_properties) {
+      if (!(this_present_properties && that_present_properties))
+        return false;
+      if (!this.properties.equals(that.properties))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_properties = true && (isSetProperties());
+    list.add(present_properties);
+    if (present_properties)
+      list.add(properties);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(EnvironmentContext other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetProperties()).compareTo(other.isSetProperties());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetProperties()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.properties, other.properties);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("EnvironmentContext(");
+    boolean first = true;
+
+    sb.append("properties:");
+    if (this.properties == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.properties);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class EnvironmentContextStandardSchemeFactory implements SchemeFactory {
+    public EnvironmentContextStandardScheme getScheme() {
+      return new EnvironmentContextStandardScheme();
+    }
+  }
+
+  private static class EnvironmentContextStandardScheme extends StandardScheme<EnvironmentContext> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, EnvironmentContext struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PROPERTIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map310 = iprot.readMapBegin();
+                struct.properties = new HashMap<String,String>(2*_map310.size);
+                String _key311;
+                String _val312;
+                for (int _i313 = 0; _i313 < _map310.size; ++_i313)
+                {
+                  _key311 = iprot.readString();
+                  _val312 = iprot.readString();
+                  struct.properties.put(_key311, _val312);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setPropertiesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, EnvironmentContext struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.properties != null) {
+        oprot.writeFieldBegin(PROPERTIES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size()));
+          for (Map.Entry<String, String> _iter314 : struct.properties.entrySet())
+          {
+            oprot.writeString(_iter314.getKey());
+            oprot.writeString(_iter314.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class EnvironmentContextTupleSchemeFactory implements SchemeFactory {
+    public EnvironmentContextTupleScheme getScheme() {
+      return new EnvironmentContextTupleScheme();
+    }
+  }
+
+  private static class EnvironmentContextTupleScheme extends TupleScheme<EnvironmentContext> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetProperties()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetProperties()) {
+        {
+          oprot.writeI32(struct.properties.size());
+          for (Map.Entry<String, String> _iter315 : struct.properties.entrySet())
+          {
+            oprot.writeString(_iter315.getKey());
+            oprot.writeString(_iter315.getValue());
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TMap _map316 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.properties = new HashMap<String,String>(2*_map316.size);
+          String _key317;
+          String _val318;
+          for (int _i319 = 0; _i319 < _map316.size; ++_i319)
+          {
+            _key317 = iprot.readString();
+            _val318 = iprot.readString();
+            struct.properties.put(_key317, _val318);
+          }
+        }
+        struct.setPropertiesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EventRequestType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EventRequestType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EventRequestType.java
new file mode 100644
index 0000000..4295046
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EventRequestType.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum EventRequestType implements org.apache.thrift.TEnum {
+  INSERT(1),
+  UPDATE(2),
+  DELETE(3);
+
+  private final int value;
+
+  private EventRequestType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static EventRequestType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return INSERT;
+      case 2:
+        return UPDATE;
+      case 3:
+        return DELETE;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
new file mode 100644
index 0000000..8f00fbd
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FieldSchema.java
@@ -0,0 +1,603 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FieldSchema implements org.apache.thrift.TBase<FieldSchema, FieldSchema._Fields>, java.io.Serializable, Cloneable, Comparable<FieldSchema> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FieldSchema");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField COMMENT_FIELD_DESC = new org.apache.thrift.protocol.TField("comment", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FieldSchemaStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FieldSchemaTupleSchemeFactory());
+  }
+
+  private String name; // required
+  private String type; // required
+  private String comment; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name"),
+    TYPE((short)2, "type"),
+    COMMENT((short)3, "comment");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        case 2: // TYPE
+          return TYPE;
+        case 3: // COMMENT
+          return COMMENT;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COMMENT, new org.apache.thrift.meta_data.FieldMetaData("comment", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FieldSchema.class, metaDataMap);
+  }
+
+  public FieldSchema() {
+  }
+
+  public FieldSchema(
+    String name,
+    String type,
+    String comment)
+  {
+    this();
+    this.name = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(name);
+    this.type = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(type);
+    this.comment = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(comment);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FieldSchema(FieldSchema other) {
+    if (other.isSetName()) {
+      this.name = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.name);
+    }
+    if (other.isSetType()) {
+      this.type = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.type);
+    }
+    if (other.isSetComment()) {
+      this.comment = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.comment);
+    }
+  }
+
+  public FieldSchema deepCopy() {
+    return new FieldSchema(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+    this.type = null;
+    this.comment = null;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(name);
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public String getType() {
+    return this.type;
+  }
+
+  public void setType(String type) {
+    this.type = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(type);
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public String getComment() {
+    return this.comment;
+  }
+
+  public void setComment(String comment) {
+    this.comment = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(comment);
+  }
+
+  public void unsetComment() {
+    this.comment = null;
+  }
+
+  /** Returns true if field comment is set (has been assigned a value) and false otherwise */
+  public boolean isSetComment() {
+    return this.comment != null;
+  }
+
+  public void setCommentIsSet(boolean value) {
+    if (!value) {
+      this.comment = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((String)value);
+      }
+      break;
+
+    case COMMENT:
+      if (value == null) {
+        unsetComment();
+      } else {
+        setComment((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    case TYPE:
+      return getType();
+
+    case COMMENT:
+      return getComment();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    case TYPE:
+      return isSetType();
+    case COMMENT:
+      return isSetComment();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FieldSchema)
+      return this.equals((FieldSchema)that);
+    return false;
+  }
+
+  public boolean equals(FieldSchema that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    boolean this_present_comment = true && this.isSetComment();
+    boolean that_present_comment = true && that.isSetComment();
+    if (this_present_comment || that_present_comment) {
+      if (!(this_present_comment && that_present_comment))
+        return false;
+      if (!this.comment.equals(that.comment))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type);
+
+    boolean present_comment = true && (isSetComment());
+    list.add(present_comment);
+    if (present_comment)
+      list.add(comment);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FieldSchema other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetComment()).compareTo(other.isSetComment());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetComment()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.comment, other.comment);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FieldSchema(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("type:");
+    if (this.type == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.type);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("comment:");
+    if (this.comment == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.comment);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FieldSchemaStandardSchemeFactory implements SchemeFactory {
+    public FieldSchemaStandardScheme getScheme() {
+      return new FieldSchemaStandardScheme();
+    }
+  }
+
+  private static class FieldSchemaStandardScheme extends StandardScheme<FieldSchema> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FieldSchema struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.type = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // COMMENT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.comment = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setCommentIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FieldSchema struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.type != null) {
+        oprot.writeFieldBegin(TYPE_FIELD_DESC);
+        oprot.writeString(struct.type);
+        oprot.writeFieldEnd();
+      }
+      if (struct.comment != null) {
+        oprot.writeFieldBegin(COMMENT_FIELD_DESC);
+        oprot.writeString(struct.comment);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FieldSchemaTupleSchemeFactory implements SchemeFactory {
+    public FieldSchemaTupleScheme getScheme() {
+      return new FieldSchemaTupleScheme();
+    }
+  }
+
+  private static class FieldSchemaTupleScheme extends TupleScheme<FieldSchema> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FieldSchema struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetType()) {
+        optionals.set(1);
+      }
+      if (struct.isSetComment()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetType()) {
+        oprot.writeString(struct.type);
+      }
+      if (struct.isSetComment()) {
+        oprot.writeString(struct.comment);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FieldSchema struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.name = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.type = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setTypeIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.comment = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setCommentIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FileMetadataExprType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FileMetadataExprType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FileMetadataExprType.java
new file mode 100644
index 0000000..4e393e2
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FileMetadataExprType.java
@@ -0,0 +1,42 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum FileMetadataExprType implements org.apache.thrift.TEnum {
+  ORC_SARG(1);
+
+  private final int value;
+
+  private FileMetadataExprType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static FileMetadataExprType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return ORC_SARG;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
new file mode 100644
index 0000000..8f5b4e5
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsResp.java
@@ -0,0 +1,449 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FindSchemasByColsResp implements org.apache.thrift.TBase<FindSchemasByColsResp, FindSchemasByColsResp._Fields>, java.io.Serializable, Cloneable, Comparable<FindSchemasByColsResp> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FindSchemasByColsResp");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_VERSIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaVersions", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FindSchemasByColsRespStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FindSchemasByColsRespTupleSchemeFactory());
+  }
+
+  private List<SchemaVersionDescriptor> schemaVersions; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SCHEMA_VERSIONS((short)1, "schemaVersions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_VERSIONS
+          return SCHEMA_VERSIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_VERSIONS, new org.apache.thrift.meta_data.FieldMetaData("schemaVersions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SchemaVersionDescriptor.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FindSchemasByColsResp.class, metaDataMap);
+  }
+
+  public FindSchemasByColsResp() {
+  }
+
+  public FindSchemasByColsResp(
+    List<SchemaVersionDescriptor> schemaVersions)
+  {
+    this();
+    this.schemaVersions = schemaVersions;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FindSchemasByColsResp(FindSchemasByColsResp other) {
+    if (other.isSetSchemaVersions()) {
+      List<SchemaVersionDescriptor> __this__schemaVersions = new ArrayList<SchemaVersionDescriptor>(other.schemaVersions.size());
+      for (SchemaVersionDescriptor other_element : other.schemaVersions) {
+        __this__schemaVersions.add(new SchemaVersionDescriptor(other_element));
+      }
+      this.schemaVersions = __this__schemaVersions;
+    }
+  }
+
+  public FindSchemasByColsResp deepCopy() {
+    return new FindSchemasByColsResp(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaVersions = null;
+  }
+
+  public int getSchemaVersionsSize() {
+    return (this.schemaVersions == null) ? 0 : this.schemaVersions.size();
+  }
+
+  public java.util.Iterator<SchemaVersionDescriptor> getSchemaVersionsIterator() {
+    return (this.schemaVersions == null) ? null : this.schemaVersions.iterator();
+  }
+
+  public void addToSchemaVersions(SchemaVersionDescriptor elem) {
+    if (this.schemaVersions == null) {
+      this.schemaVersions = new ArrayList<SchemaVersionDescriptor>();
+    }
+    this.schemaVersions.add(elem);
+  }
+
+  public List<SchemaVersionDescriptor> getSchemaVersions() {
+    return this.schemaVersions;
+  }
+
+  public void setSchemaVersions(List<SchemaVersionDescriptor> schemaVersions) {
+    this.schemaVersions = schemaVersions;
+  }
+
+  public void unsetSchemaVersions() {
+    this.schemaVersions = null;
+  }
+
+  /** Returns true if field schemaVersions is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaVersions() {
+    return this.schemaVersions != null;
+  }
+
+  public void setSchemaVersionsIsSet(boolean value) {
+    if (!value) {
+      this.schemaVersions = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_VERSIONS:
+      if (value == null) {
+        unsetSchemaVersions();
+      } else {
+        setSchemaVersions((List<SchemaVersionDescriptor>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_VERSIONS:
+      return getSchemaVersions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_VERSIONS:
+      return isSetSchemaVersions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FindSchemasByColsResp)
+      return this.equals((FindSchemasByColsResp)that);
+    return false;
+  }
+
+  public boolean equals(FindSchemasByColsResp that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaVersions = true && this.isSetSchemaVersions();
+    boolean that_present_schemaVersions = true && that.isSetSchemaVersions();
+    if (this_present_schemaVersions || that_present_schemaVersions) {
+      if (!(this_present_schemaVersions && that_present_schemaVersions))
+        return false;
+      if (!this.schemaVersions.equals(that.schemaVersions))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaVersions = true && (isSetSchemaVersions());
+    list.add(present_schemaVersions);
+    if (present_schemaVersions)
+      list.add(schemaVersions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FindSchemasByColsResp other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaVersions()).compareTo(other.isSetSchemaVersions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaVersions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaVersions, other.schemaVersions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FindSchemasByColsResp(");
+    boolean first = true;
+
+    sb.append("schemaVersions:");
+    if (this.schemaVersions == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaVersions);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FindSchemasByColsRespStandardSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRespStandardScheme getScheme() {
+      return new FindSchemasByColsRespStandardScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRespStandardScheme extends StandardScheme<FindSchemasByColsResp> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_VERSIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list944 = iprot.readListBegin();
+                struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list944.size);
+                SchemaVersionDescriptor _elem945;
+                for (int _i946 = 0; _i946 < _list944.size; ++_i946)
+                {
+                  _elem945 = new SchemaVersionDescriptor();
+                  _elem945.read(iprot);
+                  struct.schemaVersions.add(_elem945);
+                }
+                iprot.readListEnd();
+              }
+              struct.setSchemaVersionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaVersions != null) {
+        oprot.writeFieldBegin(SCHEMA_VERSIONS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.schemaVersions.size()));
+          for (SchemaVersionDescriptor _iter947 : struct.schemaVersions)
+          {
+            _iter947.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FindSchemasByColsRespTupleSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRespTupleScheme getScheme() {
+      return new FindSchemasByColsRespTupleScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRespTupleScheme extends TupleScheme<FindSchemasByColsResp> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaVersions()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetSchemaVersions()) {
+        {
+          oprot.writeI32(struct.schemaVersions.size());
+          for (SchemaVersionDescriptor _iter948 : struct.schemaVersions)
+          {
+            _iter948.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsResp struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list949 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.schemaVersions = new ArrayList<SchemaVersionDescriptor>(_list949.size);
+          SchemaVersionDescriptor _elem950;
+          for (int _i951 = 0; _i951 < _list949.size; ++_i951)
+          {
+            _elem950 = new SchemaVersionDescriptor();
+            _elem950.read(iprot);
+            struct.schemaVersions.add(_elem950);
+          }
+        }
+        struct.setSchemaVersionsIsSet(true);
+      }
+    }
+  }
+
+}
+


[73/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
index 0000000,8d88749..a46d2f9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java
@@@ -1,0 -1,9354 +1,9422 @@@
+ /* * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_COMMENT;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.Warehouse.getCatalogQualifiedTableName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.parseDbName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.CAT_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.DB_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependNotNullCatToDbName;
+ 
+ import java.io.IOException;
+ import java.net.InetAddress;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.AbstractMap;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Objects;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.Future;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.Condition;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.jdo.JDOException;
+ 
+ import com.codahale.metrics.Counter;
+ import com.google.common.collect.ImmutableList;
+ import com.google.common.collect.ImmutableListMultimap;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Multimaps;
+ 
+ import org.apache.commons.cli.OptionBuilder;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileStatus;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.events.AddForeignKeyEvent;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.StatsUpdateMode;
+ import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AddNotNullConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.AddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AddPrimaryKeyEvent;
+ import org.apache.hadoop.hive.metastore.events.AddUniqueConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateFunctionEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.AddSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.CreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.DropCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.DropConstraintEvent;
+ import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.DropFunctionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.DropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.DropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.InsertEvent;
+ import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreAddSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.events.PreDropTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreEventContext;
+ import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadCatalogEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadISchemaEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadTableEvent;
+ import org.apache.hadoop.hive.metastore.events.PreReadhSchemaVersionEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage.EventType;
+ import org.apache.hadoop.hive.metastore.metrics.JvmPauseMonitor;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.metrics.PerfLogger;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.security.MetastoreDelegationTokenManager;
+ import org.apache.hadoop.hive.metastore.security.TUGIContainingTransport;
+ import org.apache.hadoop.hive.metastore.txn.TxnStore;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.security.SecurityUtil;
+ import org.apache.hadoop.hive.metastore.utils.CommonCliOptions;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.HdfsUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.LogUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetastoreVersionInfo;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.ShutdownHookManager;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.logging.log4j.LogManager;
+ import org.apache.logging.log4j.core.LoggerContext;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.TProcessor;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.protocol.TProtocolFactory;
+ import org.apache.thrift.server.ServerContext;
+ import org.apache.thrift.server.TServer;
+ import org.apache.thrift.server.TServerEventHandler;
+ import org.apache.thrift.server.TThreadPoolServer;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TServerSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportFactory;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.facebook.fb303.FacebookBase;
+ import com.facebook.fb303.fb_status;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Preconditions;
+ import com.google.common.base.Splitter;
+ import com.google.common.util.concurrent.ThreadFactoryBuilder;
+ 
+ /**
+  * TODO:pc remove application logic to a separate interface.
+  */
+ public class HiveMetaStore extends ThriftHiveMetastore {
+   public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class);
+   public static final String PARTITION_NUMBER_EXCEED_LIMIT_MSG =
+       "Number of partitions scanned (=%d) on table '%s' exceeds limit (=%d). This is controlled on the metastore server by %s.";
+ 
+   // boolean that tells if the HiveMetaStore (remote) server is being used.
+   // Can be used to determine if the calls to metastore api (HMSHandler) are being made with
+   // embedded metastore or a remote one
+   private static boolean isMetaStoreRemote = false;
+ 
+   // Used for testing to simulate method timeout.
+   @VisibleForTesting
+   static boolean TEST_TIMEOUT_ENABLED = false;
+   @VisibleForTesting
+   static long TEST_TIMEOUT_VALUE = -1;
+ 
+   private static ShutdownHookManager shutdownHookMgr;
+ 
+   public static final String ADMIN = "admin";
+   public static final String PUBLIC = "public";
+   /** MM write states. */
+   public static final char MM_WRITE_OPEN = 'o', MM_WRITE_COMMITTED = 'c', MM_WRITE_ABORTED = 'a';
+ 
+   private static HadoopThriftAuthBridge.Server saslServer;
+   private static MetastoreDelegationTokenManager delegationTokenManager;
+   private static boolean useSasl;
+ 
+   static final String NO_FILTER_STRING = "";
+   static final int UNLIMITED_MAX_PARTITIONS = -1;
+ 
+   private static final class ChainedTTransportFactory extends TTransportFactory {
+     private final TTransportFactory parentTransFactory;
+     private final TTransportFactory childTransFactory;
+ 
+     private ChainedTTransportFactory(
+         TTransportFactory parentTransFactory,
+         TTransportFactory childTransFactory) {
+       this.parentTransFactory = parentTransFactory;
+       this.childTransFactory = childTransFactory;
+     }
+ 
+     @Override
+     public TTransport getTransport(TTransport trans) {
+       return childTransFactory.getTransport(parentTransFactory.getTransport(trans));
+     }
+   }
+ 
+   public static boolean isRenameAllowed(Database srcDB, Database destDB) {
+     if (!srcDB.getName().equalsIgnoreCase(destDB.getName())) {
+       if (ReplChangeManager.isSourceOfReplication(srcDB) || ReplChangeManager.isSourceOfReplication(destDB)) {
+         return false;
+       }
+     }
+     return true;
+   }
+ 
+   public static class HMSHandler extends FacebookBase implements IHMSHandler {
+     public static final Logger LOG = HiveMetaStore.LOG;
+     private final Configuration conf; // stores datastore (jpox) properties,
+                                      // right now they come from jpox.properties
+ 
+     // Flag to control that always threads are initialized only once
+     // instead of multiple times
+     private final static AtomicBoolean alwaysThreadsInitialized =
+         new AtomicBoolean(false);
+ 
+     private static String currentUrl;
+     private FileMetadataManager fileMetadataManager;
+     private PartitionExpressionProxy expressionProxy;
+     private StorageSchemaReader storageSchemaReader;
+ 
+     // Variables for metrics
+     // Package visible so that HMSMetricsListener can see them.
+     static AtomicInteger databaseCount, tableCount, partCount;
+ 
+     private Warehouse wh; // hdfs warehouse
+     private static final ThreadLocal<RawStore> threadLocalMS =
+         new ThreadLocal<RawStore>() {
+           @Override
+           protected RawStore initialValue() {
+             return null;
+           }
+         };
+ 
+     private static final ThreadLocal<TxnStore> threadLocalTxn = new ThreadLocal<TxnStore>() {
+       @Override
+       protected TxnStore initialValue() {
+         return null;
+       }
+     };
+ 
+     private static final ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>> timerContexts =
+         new ThreadLocal<Map<String, com.codahale.metrics.Timer.Context>>() {
+       @Override
+       protected Map<String, com.codahale.metrics.Timer.Context> initialValue() {
+         return new HashMap<>();
+       }
+     };
+ 
+     public static RawStore getRawStore() {
+       return threadLocalMS.get();
+     }
+ 
+     static void removeRawStore() {
+       threadLocalMS.remove();
+     }
+ 
+     // Thread local configuration is needed as many threads could make changes
+     // to the conf using the connection hook
+     private static final ThreadLocal<Configuration> threadLocalConf =
+         new ThreadLocal<Configuration>() {
+           @Override
+           protected Configuration initialValue() {
+             return null;
+           }
+         };
+ 
+     /**
+      * Thread local HMSHandler used during shutdown to notify meta listeners
+      */
+     private static final ThreadLocal<HMSHandler> threadLocalHMSHandler = new ThreadLocal<>();
+ 
+     /**
+      * Thread local Map to keep track of modified meta conf keys
+      */
+     private static final ThreadLocal<Map<String, String>> threadLocalModifiedConfig =
+         new ThreadLocal<>();
+ 
+     private static ExecutorService threadPool;
+ 
+     static final Logger auditLog = LoggerFactory.getLogger(
+         HiveMetaStore.class.getName() + ".audit");
+ 
+     private static void logAuditEvent(String cmd) {
+       if (cmd == null) {
+         return;
+       }
+ 
+       UserGroupInformation ugi;
+       try {
+         ugi = SecurityUtils.getUGI();
+       } catch (Exception ex) {
+         throw new RuntimeException(ex);
+       }
+ 
+       String address = getIPAddress();
+       if (address == null) {
+         address = "unknown-ip-addr";
+       }
+ 
+       auditLog.info("ugi={}	ip={}	cmd={}	", ugi.getUserName(), address, cmd);
+     }
+ 
+     private static String getIPAddress() {
+       if (useSasl) {
+         if (saslServer != null && saslServer.getRemoteAddress() != null) {
+           return saslServer.getRemoteAddress().getHostAddress();
+         }
+       } else {
+         // if kerberos is not enabled
+         return getThreadLocalIpAddress();
+       }
+       return null;
+     }
+ 
+     private static AtomicInteger nextSerialNum = new AtomicInteger();
+     private static ThreadLocal<Integer> threadLocalId = new ThreadLocal<Integer>() {
+       @Override
+       protected Integer initialValue() {
+         return nextSerialNum.getAndIncrement();
+       }
+     };
+ 
+     // This will only be set if the metastore is being accessed from a metastore Thrift server,
+     // not if it is from the CLI. Also, only if the TTransport being used to connect is an
+     // instance of TSocket. This is also not set when kerberos is used.
+     private static ThreadLocal<String> threadLocalIpAddress = new ThreadLocal<String>() {
+       @Override
+       protected String initialValue() {
+         return null;
+       }
+     };
+ 
+     /**
+      * Internal function to notify listeners for meta config change events
+      */
+     private void notifyMetaListeners(String key, String oldValue, String newValue) throws MetaException {
+       for (MetaStoreEventListener listener : listeners) {
+         listener.onConfigChange(new ConfigChangeEvent(this, key, oldValue, newValue));
+       }
+ 
+       if (transactionalListeners.size() > 0) {
+         // All the fields of this event are final, so no reason to create a new one for each
+         // listener
+         ConfigChangeEvent cce = new ConfigChangeEvent(this, key, oldValue, newValue);
+         for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+           transactionalListener.onConfigChange(cce);
+         }
+       }
+     }
+ 
+     /**
+      * Internal function to notify listeners to revert back to old values of keys
+      * that were modified during setMetaConf. This would get called from HiveMetaStore#cleanupRawStore
+      */
+     private void notifyMetaListenersOnShutDown() {
+       Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+       if (modifiedConf == null) {
+         // Nothing got modified
+         return;
+       }
+       try {
+         Configuration conf = threadLocalConf.get();
+         if (conf == null) {
+           throw new MetaException("Unexpected: modifiedConf is non-null but conf is null");
+         }
+         // Notify listeners of the changed value
+         for (Entry<String, String> entry : modifiedConf.entrySet()) {
+           String key = entry.getKey();
+           // curr value becomes old and vice-versa
+           String currVal = entry.getValue();
+           String oldVal = conf.get(key);
+           if (!Objects.equals(oldVal, currVal)) {
+             notifyMetaListeners(key, oldVal, currVal);
+           }
+         }
+         logInfo("Meta listeners shutdown notification completed.");
+       } catch (MetaException e) {
+         LOG.error("Failed to notify meta listeners on shutdown: ", e);
+       }
+     }
+ 
+     static void setThreadLocalIpAddress(String ipAddress) {
+       threadLocalIpAddress.set(ipAddress);
+     }
+ 
+     // This will return null if the metastore is not being accessed from a metastore Thrift server,
+     // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos
+     // is used
+     static String getThreadLocalIpAddress() {
+       return threadLocalIpAddress.get();
+     }
+ 
+     // Make it possible for tests to check that the right type of PartitionExpressionProxy was
+     // instantiated.
+     @VisibleForTesting
+     PartitionExpressionProxy getExpressionProxy() {
+       return expressionProxy;
+     }
+ 
+     /**
+      * Use {@link #getThreadId()} instead.
+      * @return thread id
+      */
+     @Deprecated
+     public static Integer get() {
+       return threadLocalId.get();
+     }
+ 
+     @Override
+     public int getThreadId() {
+       return threadLocalId.get();
+     }
+ 
+     public HMSHandler(String name) throws MetaException {
+       this(name, MetastoreConf.newMetastoreConf(), true);
+     }
+ 
+     public HMSHandler(String name, Configuration conf) throws MetaException {
+       this(name, conf, true);
+     }
+ 
+     public HMSHandler(String name, Configuration conf, boolean init) throws MetaException {
+       super(name);
+       this.conf = conf;
+       isInTest = MetastoreConf.getBoolVar(this.conf, ConfVars.HIVE_IN_TEST);
+       if (threadPool == null) {
+         synchronized (HMSHandler.class) {
+           int numThreads = MetastoreConf.getIntVar(conf, ConfVars.FS_HANDLER_THREADS_COUNT);
+           threadPool = Executors.newFixedThreadPool(numThreads,
+               new ThreadFactoryBuilder().setDaemon(true)
+                   .setNameFormat("HMSHandler #%d").build());
+         }
+       }
+       if (init) {
+         init();
+       }
+     }
+ 
+     /**
+      * Use {@link #getConf()} instead.
+      * @return Configuration object
+      */
+     @Deprecated
+     public Configuration getHiveConf() {
+       return conf;
+     }
+ 
+     private ClassLoader classLoader;
+     private AlterHandler alterHandler;
+     private List<MetaStorePreEventListener> preListeners;
+     private List<MetaStoreEventListener> listeners;
+     private List<TransactionalMetaStoreEventListener> transactionalListeners;
+     private List<MetaStoreEndFunctionListener> endFunctionListeners;
+     private List<MetaStoreInitListener> initListeners;
+     private Pattern partitionValidationPattern;
+     private final boolean isInTest;
+ 
+     {
+       classLoader = Thread.currentThread().getContextClassLoader();
+       if (classLoader == null) {
+         classLoader = Configuration.class.getClassLoader();
+       }
+     }
+ 
+     @Override
+     public List<TransactionalMetaStoreEventListener> getTransactionalListeners() {
+       return transactionalListeners;
+     }
+ 
+     @Override
+     public List<MetaStoreEventListener> getListeners() {
+       return listeners;
+     }
+ 
+     @Override
+     public void init() throws MetaException {
+       initListeners = MetaStoreUtils.getMetaStoreListeners(
+           MetaStoreInitListener.class, conf, MetastoreConf.getVar(conf, ConfVars.INIT_HOOKS));
+       for (MetaStoreInitListener singleInitListener: initListeners) {
+           MetaStoreInitContext context = new MetaStoreInitContext();
+           singleInitListener.onInit(context);
+       }
+ 
+       String alterHandlerName = MetastoreConf.getVar(conf, ConfVars.ALTER_HANDLER);
+       alterHandler = ReflectionUtils.newInstance(JavaUtils.getClass(
+           alterHandlerName, AlterHandler.class), conf);
+       wh = new Warehouse(conf);
+ 
+       synchronized (HMSHandler.class) {
+         if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(conf))) {
+           createDefaultDB();
+           createDefaultRoles();
+           addAdminUsers();
+           currentUrl = MetaStoreInit.getConnectionURL(conf);
+         }
+       }
+ 
+       //Start Metrics
+       if (MetastoreConf.getBoolVar(conf, ConfVars.METRICS_ENABLED)) {
+         LOG.info("Begin calculating metadata count metrics.");
+         Metrics.initialize(conf);
+         databaseCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_DATABASES);
+         tableCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_TABLES);
+         partCount = Metrics.getOrCreateGauge(MetricsConstants.TOTAL_PARTITIONS);
+         updateMetrics();
+ 
+       }
+ 
+       preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class,
+           conf, MetastoreConf.getVar(conf, ConfVars.PRE_EVENT_LISTENERS));
+       preListeners.add(0, new TransactionalValidationListener(conf));
+       listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, conf,
+           MetastoreConf.getVar(conf, ConfVars.EVENT_LISTENERS));
+       listeners.add(new SessionPropertiesListener(conf));
+       listeners.add(new AcidEventListener(conf));
+       transactionalListeners = MetaStoreUtils.getMetaStoreListeners(TransactionalMetaStoreEventListener.class,
+           conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+       if (Metrics.getRegistry() != null) {
+         listeners.add(new HMSMetricsListener(conf));
+       }
+ 
+       endFunctionListeners = MetaStoreUtils.getMetaStoreListeners(
+           MetaStoreEndFunctionListener.class, conf, MetastoreConf.getVar(conf, ConfVars.END_FUNCTION_LISTENERS));
+ 
+       String partitionValidationRegex =
+           MetastoreConf.getVar(conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+       if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+         partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+       } else {
+         partitionValidationPattern = null;
+       }
+ 
+       // We only initialize once the tasks that need to be run periodically
+       if (alwaysThreadsInitialized.compareAndSet(false, true)) {
+         ThreadPool.initialize(conf);
+         Collection<String> taskNames =
+             MetastoreConf.getStringCollection(conf, ConfVars.TASK_THREADS_ALWAYS);
+         for (String taskName : taskNames) {
+           MetastoreTaskThread task =
+               JavaUtils.newInstance(JavaUtils.getClass(taskName, MetastoreTaskThread.class));
+           task.setConf(conf);
+           long freq = task.runFrequency(TimeUnit.MILLISECONDS);
+           // For backwards compatibility, since some threads used to be hard coded but only run if
+           // frequency was > 0
+           if (freq > 0) {
+             ThreadPool.getPool().scheduleAtFixedRate(task, freq, freq, TimeUnit.MILLISECONDS);
+           }
+         }
+       }
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+       fileMetadataManager = new FileMetadataManager(this.getMS(), conf);
+     }
+ 
+     private static String addPrefix(String s) {
+       return threadLocalId.get() + ": " + s;
+     }
+ 
+     /**
+      * Set copy of invoking HMSHandler on thread local
+      */
+     private static void setHMSHandler(HMSHandler handler) {
+       if (threadLocalHMSHandler.get() == null) {
+         threadLocalHMSHandler.set(handler);
+       }
+     }
+     @Override
+     public void setConf(Configuration conf) {
+       threadLocalConf.set(conf);
+       RawStore ms = threadLocalMS.get();
+       if (ms != null) {
+         ms.setConf(conf); // reload if DS related configuration is changed
+       }
+     }
+ 
+     @Override
+     public Configuration getConf() {
+       Configuration conf = threadLocalConf.get();
+       if (conf == null) {
+         conf = new Configuration(this.conf);
+         threadLocalConf.set(conf);
+       }
+       return conf;
+     }
+ 
+     private Map<String, String> getModifiedConf() {
+       Map<String, String> modifiedConf = threadLocalModifiedConfig.get();
+       if (modifiedConf == null) {
+         modifiedConf = new HashMap<>();
+         threadLocalModifiedConfig.set(modifiedConf);
+       }
+       return modifiedConf;
+     }
+ 
+     @Override
+     public Warehouse getWh() {
+       return wh;
+     }
+ 
+     @Override
+     public void setMetaConf(String key, String value) throws MetaException {
+       ConfVars confVar = MetastoreConf.getMetaConf(key);
+       if (confVar == null) {
+         throw new MetaException("Invalid configuration key " + key);
+       }
+       try {
+         confVar.validate(value);
+       } catch (IllegalArgumentException e) {
+         throw new MetaException("Invalid configuration value " + value + " for key " + key +
+             " by " + e.getMessage());
+       }
+       Configuration configuration = getConf();
+       String oldValue = MetastoreConf.get(configuration, key);
+       // Save prev val of the key on threadLocal
+       Map<String, String> modifiedConf = getModifiedConf();
+       if (!modifiedConf.containsKey(key)) {
+         modifiedConf.put(key, oldValue);
+       }
+       // Set invoking HMSHandler on threadLocal, this will be used later to notify
+       // metaListeners in HiveMetaStore#cleanupRawStore
+       setHMSHandler(this);
+       configuration.set(key, value);
+       notifyMetaListeners(key, oldValue, value);
+ 
+       if (ConfVars.TRY_DIRECT_SQL == confVar) {
+         HMSHandler.LOG.info("Direct SQL optimization = {}",  value);
+       }
+     }
+ 
+     @Override
+     public String getMetaConf(String key) throws MetaException {
+       ConfVars confVar = MetastoreConf.getMetaConf(key);
+       if (confVar == null) {
+         throw new MetaException("Invalid configuration key " + key);
+       }
+       return getConf().get(key, confVar.getDefaultVal().toString());
+     }
+ 
+     /**
+      * Get a cached RawStore.
+      *
+      * @return the cached RawStore
+      * @throws MetaException
+      */
+     @Override
+     public RawStore getMS() throws MetaException {
+       Configuration conf = getConf();
+       return getMSForConf(conf);
+     }
+ 
+     public static RawStore getMSForConf(Configuration conf) throws MetaException {
+       RawStore ms = threadLocalMS.get();
+       if (ms == null) {
+         ms = newRawStoreForConf(conf);
+         ms.verifySchema();
+         threadLocalMS.set(ms);
+         ms = threadLocalMS.get();
+       }
+       return ms;
+     }
+ 
+     @Override
+     public TxnStore getTxnHandler() {
++      return getMsThreadTxnHandler(conf);
++    }
++
++    public static TxnStore getMsThreadTxnHandler(Configuration conf) {
+       TxnStore txn = threadLocalTxn.get();
+       if (txn == null) {
+         txn = TxnUtils.getTxnStore(conf);
+         threadLocalTxn.set(txn);
+       }
+       return txn;
+     }
+ 
+     static RawStore newRawStoreForConf(Configuration conf) throws MetaException {
+       Configuration newConf = new Configuration(conf);
+       String rawStoreClassName = MetastoreConf.getVar(newConf, ConfVars.RAW_STORE_IMPL);
+       LOG.info(addPrefix("Opening raw store with implementation class:" + rawStoreClassName));
+       return RawStoreProxy.getProxy(newConf, conf, rawStoreClassName, threadLocalId.get());
+     }
+ 
+     @VisibleForTesting
+     public static void createDefaultCatalog(RawStore ms, Warehouse wh) throws MetaException,
+         InvalidOperationException {
+       try {
+         Catalog defaultCat = ms.getCatalog(DEFAULT_CATALOG_NAME);
+         // Null check because in some test cases we get a null from ms.getCatalog.
+         if (defaultCat !=null && defaultCat.getLocationUri().equals("TBD")) {
+           // One time update issue.  When the new 'hive' catalog is created in an upgrade the
+           // script does not know the location of the warehouse.  So we need to update it.
+           LOG.info("Setting location of default catalog, as it hasn't been done after upgrade");
+           defaultCat.setLocationUri(wh.getWhRoot().toString());
+           ms.alterCatalog(defaultCat.getName(), defaultCat);
+         }
+ 
+       } catch (NoSuchObjectException e) {
+         Catalog cat = new Catalog(DEFAULT_CATALOG_NAME, wh.getWhRoot().toString());
+         cat.setDescription(Warehouse.DEFAULT_CATALOG_COMMENT);
+         ms.createCatalog(cat);
+       }
+     }
+ 
+     private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException {
+       try {
+         ms.getDatabase(DEFAULT_CATALOG_NAME, DEFAULT_DATABASE_NAME);
+       } catch (NoSuchObjectException e) {
+         Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT,
+           wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null);
+         db.setOwnerName(PUBLIC);
+         db.setOwnerType(PrincipalType.ROLE);
+         db.setCatalogName(DEFAULT_CATALOG_NAME);
+         ms.createDatabase(db);
+       }
+     }
+ 
+     /**
+      * create default database if it doesn't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught
+      * for one more time try, if failed again, simply ignored by warning, which meant another
+      * succeeds.
+      *
+      * @throws MetaException
+      */
+     private void createDefaultDB() throws MetaException {
+       try {
+         RawStore ms = getMS();
+         createDefaultCatalog(ms, wh);
+         createDefaultDB_core(ms);
+       } catch (JDOException e) {
+         LOG.warn("Retrying creating default database after error: " + e.getMessage(), e);
+         try {
+           createDefaultDB_core(getMS());
+         } catch (InvalidObjectException e1) {
+           throw new MetaException(e1.getMessage());
+         }
+       } catch (InvalidObjectException|InvalidOperationException e) {
+         throw new MetaException(e.getMessage());
+       }
+     }
+ 
+     /**
+      * create default roles if they don't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught
+      * for one more time try, if failed again, simply ignored by warning, which meant another
+      * succeeds.
+      *
+      * @throws MetaException
+      */
+     private void createDefaultRoles() throws MetaException {
+       try {
+         createDefaultRoles_core();
+       } catch (JDOException e) {
+         LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e);
+         createDefaultRoles_core();
+       }
+     }
+ 
+     private void createDefaultRoles_core() throws MetaException {
+ 
+       RawStore ms = getMS();
+       try {
+         ms.addRole(ADMIN, ADMIN);
+       } catch (InvalidObjectException e) {
+         LOG.debug(ADMIN +" role already exists",e);
+       } catch (NoSuchObjectException e) {
+         // This should never be thrown.
+         LOG.warn("Unexpected exception while adding " +ADMIN+" roles" , e);
+       }
+       LOG.info("Added "+ ADMIN+ " role in metastore");
+       try {
+         ms.addRole(PUBLIC, PUBLIC);
+       } catch (InvalidObjectException e) {
+         LOG.debug(PUBLIC + " role already exists",e);
+       } catch (NoSuchObjectException e) {
+         // This should never be thrown.
+         LOG.warn("Unexpected exception while adding "+PUBLIC +" roles" , e);
+       }
+       LOG.info("Added "+PUBLIC+ " role in metastore");
+       // now grant all privs to admin
+       PrivilegeBag privs = new PrivilegeBag();
+       privs.addToPrivileges(new HiveObjectPrivilege( new HiveObjectRef(HiveObjectType.GLOBAL, null,
+         null, null, null), ADMIN, PrincipalType.ROLE, new PrivilegeGrantInfo("All", 0, ADMIN,
+           PrincipalType.ROLE, true), "SQL"));
+       try {
+         ms.grantPrivileges(privs);
+       } catch (InvalidObjectException e) {
+         // Surprisingly these privs are already granted.
+         LOG.debug("Failed while granting global privs to admin", e);
+       } catch (NoSuchObjectException e) {
+         // Unlikely to be thrown.
+         LOG.warn("Failed while granting global privs to admin", e);
+       }
+     }
+ 
+     /**
+      * add admin users if they don't exist.
+      *
+      * This is a potential contention when HiveServer2 using embedded metastore and Metastore
+      * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for
+      * one more time try, if failed again, simply ignored by warning, which meant another succeeds.
+      *
+      * @throws MetaException
+      */
+     private void addAdminUsers() throws MetaException {
+       try {
+         addAdminUsers_core();
+       } catch (JDOException e) {
+         LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e);
+         addAdminUsers_core();
+       }
+     }
+ 
+     private void addAdminUsers_core() throws MetaException {
+ 
+       // now add pre-configured users to admin role
+       String userStr = MetastoreConf.getVar(conf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim();
+       if (userStr.isEmpty()) {
+         LOG.info("No user is added in admin role, since config is empty");
+         return;
+       }
+       // Since user names need to be valid unix user names, per IEEE Std 1003.1-2001 they cannot
+       // contain comma, so we can safely split above string on comma.
+ 
+      Iterator<String> users = Splitter.on(",").trimResults().omitEmptyStrings().split(userStr).iterator();
+       if (!users.hasNext()) {
+         LOG.info("No user is added in admin role, since config value "+ userStr +
+           " is in incorrect format. We accept comma separated list of users.");
+         return;
+       }
+       Role adminRole;
+       RawStore ms = getMS();
+       try {
+         adminRole = ms.getRole(ADMIN);
+       } catch (NoSuchObjectException e) {
+         LOG.error("Failed to retrieve just added admin role",e);
+         return;
+       }
+       while (users.hasNext()) {
+         String userName = users.next();
+         try {
+           ms.grantRole(adminRole, userName, PrincipalType.USER, ADMIN, PrincipalType.ROLE, true);
+           LOG.info("Added " + userName + " to admin role");
+         } catch (NoSuchObjectException e) {
+           LOG.error("Failed to add "+ userName + " in admin role",e);
+         } catch (InvalidObjectException e) {
+           LOG.debug(userName + " already in admin role", e);
+         }
+       }
+     }
+ 
+     private static void logInfo(String m) {
+       LOG.info(threadLocalId.get().toString() + ": " + m);
+       logAuditEvent(m);
+     }
+ 
+     private String startFunction(String function, String extraLogInfo) {
+       incrementCounter(function);
+       logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") +
+           function + extraLogInfo);
+       com.codahale.metrics.Timer timer =
+           Metrics.getOrCreateTimer(MetricsConstants.API_PREFIX + function);
+       if (timer != null) {
+         // Timer will be null we aren't using the metrics
+         timerContexts.get().put(function, timer.time());
+       }
+       Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+       if (counter != null) {
+         counter.inc();
+       }
+       return function;
+     }
+ 
+     private String startFunction(String function) {
+       return startFunction(function, "");
+     }
+ 
+     private void startTableFunction(String function, String catName, String db, String tbl) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(catName, db, tbl));
+     }
+ 
+     private void startMultiTableFunction(String function, String db, List<String> tbls) {
+       String tableNames = join(tbls, ",");
+       startFunction(function, " : db=" + db + " tbls=" + tableNames);
+     }
+ 
+     private void startPartitionFunction(String function, String cat, String db, String tbl,
+                                         List<String> partVals) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(cat, db, tbl) + "[" + join(partVals, ",") + "]");
+     }
+ 
+     private void startPartitionFunction(String function, String catName, String db, String tbl,
+                                         Map<String, String> partName) {
+       startFunction(function, " : tbl=" +
+           TableName.getQualified(catName, db, tbl) + "partition=" + partName);
+     }
+ 
+     private void endFunction(String function, boolean successful, Exception e) {
+       endFunction(function, successful, e, null);
+     }
+     private void endFunction(String function, boolean successful, Exception e,
+                             String inputTableName) {
+       endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName));
+     }
+ 
+     private void endFunction(String function, MetaStoreEndFunctionContext context) {
+       com.codahale.metrics.Timer.Context timerContext = timerContexts.get().remove(function);
+       if (timerContext != null) {
+         timerContext.close();
+       }
+       Counter counter = Metrics.getOrCreateCounter(MetricsConstants.ACTIVE_CALLS + function);
+       if (counter != null) {
+         counter.dec();
+       }
+ 
+       for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+         listener.onEndFunction(function, context);
+       }
+     }
+ 
+     @Override
+     public fb_status getStatus() {
+       return fb_status.ALIVE;
+     }
+ 
+     @Override
+     public void shutdown() {
+       cleanupRawStore();
+       PerfLogger.getPerfLogger(false).cleanupPerfLogMetrics();
+     }
+ 
+     @Override
+     public AbstractMap<String, Long> getCounters() {
+       AbstractMap<String, Long> counters = super.getCounters();
+ 
+       // Allow endFunctionListeners to add any counters they have collected
+       if (endFunctionListeners != null) {
+         for (MetaStoreEndFunctionListener listener : endFunctionListeners) {
+           listener.exportCounters(counters);
+         }
+       }
+ 
+       return counters;
+     }
+ 
+     @Override
+     public void create_catalog(CreateCatalogRequest rqst)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       Catalog catalog = rqst.getCatalog();
+       startFunction("create_catalog", ": " + catalog.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         try {
+           getMS().getCatalog(catalog.getName());
+           throw new AlreadyExistsException("Catalog " + catalog.getName() + " already exists");
+         } catch (NoSuchObjectException e) {
+           // expected
+         }
+ 
+         if (!MetaStoreUtils.validateName(catalog.getName(), null)) {
+           throw new InvalidObjectException(catalog.getName() + " is not a valid catalog name");
+         }
+ 
+         if (catalog.getLocationUri() == null) {
+           throw new InvalidObjectException("You must specify a path for the catalog");
+         }
+ 
+         RawStore ms = getMS();
+         Path catPath = new Path(catalog.getLocationUri());
+         boolean madeDir = false;
+         Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+         try {
+           firePreEvent(new PreCreateCatalogEvent(this, catalog));
+           if (!wh.isDir(catPath)) {
+             if (!wh.mkdirs(catPath)) {
+               throw new MetaException("Unable to create catalog path " + catPath +
+                   ", failed to create catalog " + catalog.getName());
+             }
+             madeDir = true;
+           }
+ 
+           ms.openTransaction();
+           ms.createCatalog(catalog);
+ 
+           // Create a default database inside the catalog
+           Database db = new Database(DEFAULT_DATABASE_NAME, "Default database for catalog " +
+                            catalog.getName(), catalog.getLocationUri(), Collections.emptyMap());
+           db.setCatalogName(catalog.getName());
+           create_database_core(ms, db);
+ 
+           if (!transactionalListeners.isEmpty()) {
+             transactionalListenersResponses =
+                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                     EventType.CREATE_CATALOG,
+                     new CreateCatalogEvent(true, this, catalog));
+           }
+ 
+           success = ms.commitTransaction();
+         } finally {
+           if (!success) {
+             ms.rollbackTransaction();
+             if (madeDir) {
+               wh.deleteDir(catPath, true, false, false);
+             }
+           }
+ 
+           if (!listeners.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners,
+                 EventType.CREATE_CATALOG,
+                 new CreateCatalogEvent(success, this, catalog),
+                 null,
+                 transactionalListenersResponses, ms);
+           }
+         }
+         success = true;
+       } catch (AlreadyExistsException|InvalidObjectException|MetaException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("create_catalog", success, ex);
+       }
+     }
+ 
+     @Override
+     public void alter_catalog(AlterCatalogRequest rqst) throws TException {
+       startFunction("alter_catalog " + rqst.getName());
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+       GetCatalogResponse oldCat = null;
+ 
+       try {
+         oldCat = get_catalog(new GetCatalogRequest(rqst.getName()));
+         // Above should have thrown NoSuchObjectException if there is no such catalog
+         assert oldCat != null && oldCat.getCatalog() != null;
+         firePreEvent(new PreAlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), this));
+ 
+         ms.openTransaction();
+         ms.alterCatalog(rqst.getName(), rqst.getNewCat());
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.ALTER_CATALOG,
+                   new AlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+ 
+         if ((null != oldCat) && (!listeners.isEmpty())) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.ALTER_CATALOG,
+               new AlterCatalogEvent(oldCat.getCatalog(), rqst.getNewCat(), success, this),
+               null, transactionalListenersResponses, ms);
+         }
+         endFunction("alter_catalog", success, ex);
+       }
+ 
+     }
+ 
+     @Override
+     public GetCatalogResponse get_catalog(GetCatalogRequest rqst)
+         throws NoSuchObjectException, TException {
+       String catName = rqst.getName();
+       startFunction("get_catalog", ": " + catName);
+       Catalog cat = null;
+       Exception ex = null;
+       try {
+         cat = getMS().getCatalog(catName);
+         firePreEvent(new PreReadCatalogEvent(this, cat));
+         return new GetCatalogResponse(cat);
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_database", cat != null, ex);
+       }
+     }
+ 
+     @Override
+     public GetCatalogsResponse get_catalogs() throws MetaException {
+       startFunction("get_catalogs");
+ 
+       List<String> ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getCatalogs();
+       } catch (MetaException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_catalog", ret != null, ex);
+       }
+       return new GetCatalogsResponse(ret == null ? Collections.emptyList() : ret);
+ 
+     }
+ 
+     @Override
+     public void drop_catalog(DropCatalogRequest rqst)
+         throws NoSuchObjectException, InvalidOperationException, MetaException {
+       String catName = rqst.getName();
+       startFunction("drop_catalog", ": " + catName);
+       if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(catName)) {
+         endFunction("drop_catalog", false, null);
+         throw new MetaException("Can not drop " + DEFAULT_CATALOG_NAME + " catalog");
+       }
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         dropCatalogCore(catName);
+         success = true;
+       } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("drop_catalog", success, ex);
+       }
+ 
+     }
+ 
+     private void dropCatalogCore(String catName)
+         throws MetaException, NoSuchObjectException, InvalidOperationException {
+       boolean success = false;
+       Catalog cat = null;
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       RawStore ms = getMS();
+       try {
+         ms.openTransaction();
+         cat = ms.getCatalog(catName);
+ 
+         firePreEvent(new PreDropCatalogEvent(this, cat));
+ 
+         List<String> allDbs = get_databases(prependNotNullCatToDbName(catName, null));
+         if (allDbs != null && !allDbs.isEmpty()) {
+           // It might just be the default, in which case we can drop that one if it's empty
+           if (allDbs.size() == 1 && allDbs.get(0).equals(DEFAULT_DATABASE_NAME)) {
+             try {
+               drop_database_core(ms, catName, DEFAULT_DATABASE_NAME, true, false);
+             } catch (InvalidOperationException e) {
+               // This means there are tables of something in the database
+               throw new InvalidOperationException("There are still objects in the default " +
+                   "database for catalog " + catName);
+             } catch (InvalidObjectException|IOException|InvalidInputException e) {
+               MetaException me = new MetaException("Error attempt to drop default database for " +
+                   "catalog " + catName);
+               me.initCause(e);
+               throw me;
+             }
+           } else {
+             throw new InvalidOperationException("There are non-default databases in the catalog " +
+                 catName + " so it cannot be dropped.");
+           }
+         }
+ 
+         ms.dropCatalog(catName) ;
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenerResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.DROP_CATALOG,
+                   new DropCatalogEvent(true, this, cat));
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (success) {
+           wh.deleteDir(wh.getDnsPath(new Path(cat.getLocationUri())), false, false, false);
+         } else {
+           ms.rollbackTransaction();
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.DROP_CATALOG,
+               new DropCatalogEvent(success, this, cat),
+               null,
+               transactionalListenerResponses, ms);
+         }
+       }
+     }
+ 
+ 
+     // Assumes that the catalog has already been set.
+     private void create_database_core(RawStore ms, final Database db)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       if (!MetaStoreUtils.validateName(db.getName(), null)) {
+         throw new InvalidObjectException(db.getName() + " is not a valid database name");
+       }
+ 
+       Catalog cat = null;
+       try {
+         cat = getMS().getCatalog(db.getCatalogName());
+       } catch (NoSuchObjectException e) {
+         LOG.error("No such catalog " + db.getCatalogName());
+         throw new InvalidObjectException("No such catalog " + db.getCatalogName());
+       }
+       Path dbPath = wh.determineDatabasePath(cat, db);
+       db.setLocationUri(dbPath.toString());
+ 
+       boolean success = false;
+       boolean madeDir = false;
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+       try {
+         firePreEvent(new PreCreateDatabaseEvent(db, this));
+         if (!wh.isDir(dbPath)) {
+           LOG.debug("Creating database path " + dbPath);
+           if (!wh.mkdirs(dbPath)) {
+             throw new MetaException("Unable to create database path " + dbPath +
+                 ", failed to create database " + db.getName());
+           }
+           madeDir = true;
+         }
+ 
+         ms.openTransaction();
+         ms.createDatabase(db);
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                     EventType.CREATE_DATABASE,
+                                                     new CreateDatabaseEvent(db, true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+           if (madeDir) {
+             wh.deleteDir(dbPath, true, db);
+           }
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+                                                 EventType.CREATE_DATABASE,
+                                                 new CreateDatabaseEvent(db, success, this),
+                                                 null,
+                                                 transactionalListenersResponses, ms);
+         }
+       }
+     }
+ 
+     @Override
+     public void create_database(final Database db)
+         throws AlreadyExistsException, InvalidObjectException, MetaException {
+       startFunction("create_database", ": " + db.toString());
+       boolean success = false;
+       Exception ex = null;
+       if (!db.isSetCatalogName()) {
+         db.setCatalogName(getDefaultCatalog(conf));
+       }
+       try {
+         try {
+           if (null != get_database_core(db.getCatalogName(), db.getName())) {
+             throw new AlreadyExistsException("Database " + db.getName() + " already exists");
+           }
+         } catch (NoSuchObjectException e) {
+           // expected
+         }
+ 
+         if (TEST_TIMEOUT_ENABLED) {
+           try {
+             Thread.sleep(TEST_TIMEOUT_VALUE);
+           } catch (InterruptedException e) {
+             // do nothing
+           }
+           Deadline.checkTimeout();
+         }
+         create_database_core(getMS(), db);
+         success = true;
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_database", success, ex);
+       }
+     }
+ 
+     @Override
+     public Database get_database(final String name) throws NoSuchObjectException, MetaException {
+       startFunction("get_database", ": " + name);
+       Database db = null;
+       Exception ex = null;
+       try {
+         String[] parsedDbName = parseDbName(name, conf);
+         db = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
+         firePreEvent(new PreReadDatabaseEvent(db, this));
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         endFunction("get_database", db != null, ex);
+       }
+       return db;
+     }
+ 
+     @Override
+     public Database get_database_core(String catName, final String name) throws NoSuchObjectException, MetaException {
+       Database db = null;
+       if (name == null) {
+         throw new MetaException("Database name cannot be null.");
+       }
+       try {
+         db = getMS().getDatabase(catName, name);
+       } catch (MetaException | NoSuchObjectException e) {
+         throw e;
+       } catch (Exception e) {
+         assert (e instanceof RuntimeException);
+         throw (RuntimeException) e;
+       }
+       return db;
+     }
+ 
+     @Override
+     public void alter_database(final String dbName, final Database newDB) throws TException {
+       startFunction("alter_database " + dbName);
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       Database oldDB = null;
+       Map<String, String> transactionalListenersResponses = Collections.emptyMap();
+ 
+       // Perform the same URI normalization as create_database_core.
+       if (newDB.getLocationUri() != null) {
+         newDB.setLocationUri(wh.getDnsPath(new Path(newDB.getLocationUri())).toString());
+       }
+ 
+       String[] parsedDbName = parseDbName(dbName, conf);
+ 
+       try {
+         oldDB = get_database_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME]);
+         if (oldDB == null) {
+           throw new MetaException("Could not alter database \"" + parsedDbName[DB_NAME] +
+               "\". Could not retrieve old definition.");
+         }
+         firePreEvent(new PreAlterDatabaseEvent(oldDB, newDB, this));
+ 
+         ms.openTransaction();
+         ms.alterDatabase(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], newDB);
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenersResponses =
+               MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventType.ALTER_DATABASE,
+                   new AlterDatabaseEvent(oldDB, newDB, true, this));
+         }
+ 
+         success = ms.commitTransaction();
+       } catch (MetaException|NoSuchObjectException e) {
+         ex = e;
+         throw e;
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+ 
+         if ((null != oldDB) && (!listeners.isEmpty())) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+               EventType.ALTER_DATABASE,
+               new AlterDatabaseEvent(oldDB, newDB, success, this),
+               null,
+               transactionalListenersResponses, ms);
+         }
+         endFunction("alter_database", success, ex);
+       }
+     }
+ 
+     private void drop_database_core(RawStore ms, String catName,
+         final String name, final boolean deleteData, final boolean cascade)
+         throws NoSuchObjectException, InvalidOperationException, MetaException,
+         IOException, InvalidObjectException, InvalidInputException {
+       boolean success = false;
+       Database db = null;
+       List<Path> tablePaths = new ArrayList<>();
+       List<Path> partitionPaths = new ArrayList<>();
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       if (name == null) {
+         throw new MetaException("Database name cannot be null.");
+       }
+       try {
+         ms.openTransaction();
+         db = ms.getDatabase(catName, name);
+ 
+         if (!isInTest && ReplChangeManager.isSourceOfReplication(db)) {
+           throw new InvalidOperationException("can not drop a database which is a source of replication");
+         }
+ 
+         firePreEvent(new PreDropDatabaseEvent(db, this));
+         String catPrependedName = MetaStoreUtils.prependCatalogToDbName(catName, name, conf);
+ 
+         Set<String> uniqueTableNames = new HashSet<>(get_all_tables(catPrependedName));
+         List<String> allFunctions = get_functions(catPrependedName, "*");
+ 
+         if (!cascade) {
+           if (!uniqueTableNames.isEmpty()) {
+             throw new InvalidOperationException(
+                 "Database " + db.getName() + " is not empty. One or more tables exist.");
+           }
+           if (!allFunctions.isEmpty()) {
+             throw new InvalidOperationException(
+                 "Database " + db.getName() + " is not empty. One or more functions exist.");
+           }
+         }
+         Path path = new Path(db.getLocationUri()).getParent();
+         if (!wh.isWritable(path)) {
+           throw new MetaException("Database not dropped since " +
+               path + " is not writable by " +
+               SecurityUtils.getUser());
+         }
+ 
+         Path databasePath = wh.getDnsPath(wh.getDatabasePath(db));
+ 
+         // drop any functions before dropping db
+         for (String funcName : allFunctions) {
+           drop_function(catPrependedName, funcName);
+         }
+ 
+         final int tableBatchSize = MetastoreConf.getIntVar(conf,
+             ConfVars.BATCH_RETRIEVE_MAX);
+ 
+         // First pass will drop the materialized views
+         List<String> materializedViewNames = get_tables_by_type(name, ".*", TableType.MATERIALIZED_VIEW.toString());
+         int startIndex = 0;
+         // retrieve the tables from the metastore in batches to alleviate memory constraints
+         while (startIndex < materializedViewNames.size()) {
+           int endIndex = Math.min(startIndex + tableBatchSize, materializedViewNames.size());
+ 
+           List<Table> materializedViews;
+           try {
+             materializedViews = ms.getTableObjectsByName(catName, name, materializedViewNames.subList(startIndex, endIndex));
+           } catch (UnknownDBException e) {
+             throw new MetaException(e.getMessage());
+           }
+ 
+           if (materializedViews != null && !materializedViews.isEmpty()) {
+             for (Table materializedView : materializedViews) {
+               if (materializedView.getSd().getLocation() != null) {
+                 Path materializedViewPath = wh.getDnsPath(new Path(materializedView.getSd().getLocation()));
+                 if (!wh.isWritable(materializedViewPath.getParent())) {
+                   throw new MetaException("Database metadata not deleted since table: " +
+                       materializedView.getTableName() + " has a parent location " + materializedViewPath.getParent() +
+                       " which is not writable by " + SecurityUtils.getUser());
+                 }
+ 
+                 if (!FileUtils.isSubdirectory(databasePath.toString(),
+                     materializedViewPath.toString())) {
+                   tablePaths.add(materializedViewPath);
+                 }
+               }
+               // Drop the materialized view but not its data
+               drop_table(name, materializedView.getTableName(), false);
+               // Remove from all tables
+               uniqueTableNames.remove(materializedView.getTableName());
+             }
+           }
+           startIndex = endIndex;
+         }
+ 
+         // drop tables before dropping db
+         List<String> allTables = new ArrayList<>(uniqueTableNames);
+         startIndex = 0;
+         // retrieve the tables from the metastore in batches to alleviate memory constraints
+         while (startIndex < allTables.size()) {
+           int endIndex = Math.min(startIndex + tableBatchSize, allTables.size());
+ 
+           List<Table> tables;
+           try {
+             tables = ms.getTableObjectsByName(catName, name, allTables.subList(startIndex, endIndex));
+           } catch (UnknownDBException e) {
+             throw new MetaException(e.getMessage());
+           }
+ 
+           if (tables != null && !tables.isEmpty()) {
+             for (Table table : tables) {
+               // If the table is not external and it might not be in a subdirectory of the database
+               // add it's locations to the list of paths to delete
+               Path tablePath = null;
+               boolean tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(table, deleteData);
+               if (table.getSd().getLocation() != null && tableDataShouldBeDeleted) {
+                 tablePath = wh.getDnsPath(new Path(table.getSd().getLocation()));
+                 if (!wh.isWritable(tablePath.getParent())) {
+                   throw new MetaException("Database metadata not deleted since table: " +
+                       table.getTableName() + " has a parent location " + tablePath.getParent() +
+                       " which is not writable by " + SecurityUtils.getUser());
+                 }
+ 
+                 if (!FileUtils.isSubdirectory(databasePath.toString(), tablePath.toString())) {
+                   tablePaths.add(tablePath);
+                 }
+               }
+ 
+               // For each partition in each table, drop the partitions and get a list of
+               // partitions' locations which might need to be deleted
+               partitionPaths = dropPartitionsAndGetLocations(ms, catName, name, table.getTableName(),
+                   tablePath, tableDataShouldBeDeleted);
+ 
+               // Drop the table but not its data
+               drop_table(MetaStoreUtils.prependCatalogToDbName(table.getCatName(), table.getDbName(), conf),
+                   table.getTableName(), false);
+             }
+ 
+             startIndex = endIndex;
+           }
+         }
+ 
+         if (ms.dropDatabase(catName, name)) {
+           if (!transactionalListeners.isEmpty()) {
+             transactionalListenerResponses =
+                 MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                       EventType.DROP_DATABASE,
+                                                       new DropDatabaseEvent(db, true, this));
+           }
+ 
+           success = ms.commitTransaction();
+         }
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         } else if (deleteData) {
+           // Delete the data in the partitions which have other locations
+           deletePartitionData(partitionPaths, false, db);
+           // Delete the data in the tables which have other locations
+           for (Path tablePath : tablePaths) {
+             deleteTableData(tablePath, false, db);
+           }
+           // Delete the data in the database
+           try {
+             wh.deleteDir(new Path(db.getLocationUri()), true, db);
+           } catch (Exception e) {
+             LOG.error("Failed to delete database directory: " + db.getLocationUri() +
+                 " " + e.getMessage());
+           }
+           // it is not a terrible thing even if the data is not deleted
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners,
+                                                 EventType.DROP_DATABASE,
+                                                 new DropDatabaseEvent(db, success, this),
+                                                 null,
+                                                 transactionalListenerResponses, ms);
+         }
+       }
+     }
+ 
+     @Override
+     public void drop_database(final String dbName, final boolean deleteData, final boolean cascade)
+         throws NoSuchObjectException, InvalidOperationException, MetaException {
+       startFunction("drop_database", ": " + dbName);
+       String[] parsedDbName = parseDbName(dbName, conf);
+       if (DEFAULT_CATALOG_NAME.equalsIgnoreCase(parsedDbName[CAT_NAME]) &&
+           DEFAULT_DATABASE_NAME.equalsIgnoreCase(parsedDbName[DB_NAME])) {
+         endFunction("drop_database", false, null);
+         throw new MetaException("Can not drop " + DEFAULT_DATABASE_NAME + " database in catalog "
+             + DEFAULT_CATALOG_NAME);
+       }
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         drop_database_core(getMS(), parsedDbName[CAT_NAME], parsedDbName[DB_NAME], deleteData,
+             cascade);
+         success = true;
+       } catch (NoSuchObjectException|InvalidOperationException|MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("drop_database", success, ex);
+       }
+     }
+ 
+ 
+     @Override
+     public List<String> get_databases(final String pattern) throws MetaException {
+       startFunction("get_databases", ": " + pattern);
+ 
+       String[] parsedDbNamed = parseDbName(pattern, conf);
+       List<String> ret = null;
+       Exception ex = null;
+       try {
+         if (parsedDbNamed[DB_NAME] == null) {
+           ret = getMS().getAllDatabases(parsedDbNamed[CAT_NAME]);
+         } else {
+           ret = getMS().getDatabases(parsedDbNamed[CAT_NAME], parsedDbNamed[DB_NAME]);
+         }
+       } catch (MetaException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("get_databases", ret != null, ex);
+       }
+       return ret;
+     }
+ 
+     @Override
+     public List<String> get_all_databases() throws MetaException {
+       return get_databases(MetaStoreUtils.prependCatalogToDbName(null, null, conf));
+     }
+ 
+     private void create_type_core(final RawStore ms, final Type type)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       if (!MetaStoreUtils.validateName(type.getName(), null)) {
+         throw new InvalidObjectException("Invalid type name");
+       }
+ 
+       boolean success = false;
+       try {
+         ms.openTransaction();
+         if (is_type_exists(ms, type.getName())) {
+           throw new AlreadyExistsException("Type " + type.getName() + " already exists");
+         }
+         ms.createType(type);
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+         }
+       }
+     }
+ 
+     @Override
+     public boolean create_type(final Type type) throws AlreadyExistsException,
+         MetaException, InvalidObjectException {
+       startFunction("create_type", ": " + type.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_type_core(getMS(), type);
+         success = true;
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_type", success, ex);
+       }
+ 
+       return success;
+     }
+ 
+     @Override
+     public Type get_type(final String name) throws MetaException, NoSuchObjectException {
+       startFunction("get_type", ": " + name);
+ 
+       Type ret = null;
+       Exception ex = null;
+       try {
+         ret = getMS().getType(name);
+         if (null == ret) {
+           throw new NoSuchObjectException("Type \"" + name + "\" not found.");
+         }
+       } catch (Exception e) {
+         ex = e;
+         throwMetaException(e);
+       } finally {
+         endFunction("get_type", ret != null, ex);
+       }
+       return ret;
+     }
+ 
+     private boolean is_type_exists(RawStore ms, String typeName)
+         throws MetaException {
+       return (ms.getType(typeName) != null);
+     }
+ 
+     @Override
+     public boolean drop_type(final String name) throws MetaException, NoSuchObjectException {
+       startFunction("drop_type", ": " + name);
+ 
+       boolean success = false;
+       Exception ex = null;
+       try {
+         // TODO:pc validate that there are no types that refer to this
+         success = getMS().dropType(name);
+       } catch (Exception e) {
+         ex = e;
+         throwMetaException(e);
+       } finally {
+         endFunction("drop_type", success, ex);
+       }
+       return success;
+     }
+ 
+     @Override
+     public Map<String, Type> get_type_all(String name) throws MetaException {
+       // TODO Auto-generated method stub
+       startFunction("get_type_all", ": " + name);
+       endFunction("get_type_all", false, null);
+       throw new MetaException("Not yet implemented");
+     }
+ 
+     private void create_table_core(final RawStore ms, final Table tbl,
+         final EnvironmentContext envContext)
+             throws AlreadyExistsException, MetaException,
+             InvalidObjectException, NoSuchObjectException {
+       create_table_core(ms, tbl, envContext, null, null, null, null, null, null);
+     }
+ 
+     private void create_table_core(final RawStore ms, final Table tbl,
+         final EnvironmentContext envContext, List<SQLPrimaryKey> primaryKeys,
+         List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+         List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints,
+                                    List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, MetaException,
+         InvalidObjectException, NoSuchObjectException {
+       // To preserve backward compatibility throw MetaException in case of null database
+       if (tbl.getDbName() == null) {
+         throw new MetaException("Null database name is not allowed");
+       }
+ 
+       if (!MetaStoreUtils.validateName(tbl.getTableName(), conf)) {
+         throw new InvalidObjectException(tbl.getTableName()
+             + " is not a valid object name");
+       }
+       String validate = MetaStoreUtils.validateTblColumns(tbl.getSd().getCols());
+       if (validate != null) {
+         throw new InvalidObjectException("Invalid column " + validate);
+       }
+       if (tbl.getPartitionKeys() != null) {
+         validate = MetaStoreUtils.validateTblColumns(tbl.getPartitionKeys());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid partition column " + validate);
+         }
+       }
+       SkewedInfo skew = tbl.getSd().getSkewedInfo();
+       if (skew != null) {
+         validate = MetaStoreUtils.validateSkewedColNames(skew.getSkewedColNames());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid skew column " + validate);
+         }
+         validate = MetaStoreUtils.validateSkewedColNamesSubsetCol(
+             skew.getSkewedColNames(), tbl.getSd().getCols());
+         if (validate != null) {
+           throw new InvalidObjectException("Invalid skew column " + validate);
+         }
+       }
+ 
+       Map<String, String> transactionalListenerResponses = Collections.emptyMap();
+       Path tblPath = null;
+       boolean success = false, madeDir = false;
+       Database db = null;
+       try {
+         if (!tbl.isSetCatName()) {
+           tbl.setCatName(getDefaultCatalog(conf));
+         }
+         firePreEvent(new PreCreateTableEvent(tbl, this));
+ 
+         ms.openTransaction();
+ 
+         db = ms.getDatabase(tbl.getCatName(), tbl.getDbName());
+ 
+         // get_table checks whether database exists, it should be moved here
+         if (is_table_exists(ms, tbl.getCatName(), tbl.getDbName(), tbl.getTableName())) {
+           throw new AlreadyExistsException("Table " + getCatalogQualifiedTableName(tbl)
+               + " already exists");
+         }
+ 
+         if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) {
+           if (tbl.getSd().getLocation() == null
+               || tbl.getSd().getLocation().isEmpty()) {
+             tblPath = wh.getDefaultTablePath(db, tbl);
+           } else {
+             if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) {
+               LOG.warn("Location: " + tbl.getSd().getLocation()
+                   + " specified for non-external table:" + tbl.getTableName());
+             }
+             tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation()));
+           }
+           tbl.getSd().setLocation(tblPath.toString());
+         }
+ 
+         if (tblPath != null) {
+           if (!wh.isDir(tblPath)) {
+             if (!wh.mkdirs(tblPath)) {
+               throw new MetaException(tblPath
+                   + " is not a directory or unable to create one");
+             }
+             madeDir = true;
+           }
+         }
+         if (MetastoreConf.getBoolVar(conf, ConfVars.STATS_AUTO_GATHER) &&
+             !MetaStoreUtils.isView(tbl)) {
+           MetaStoreUtils.updateTableStatsSlow(db, tbl, wh, madeDir, false, envContext);
+         }
+ 
+         // set create time
+         long time = System.currentTimeMillis() / 1000;
+         tbl.setCreateTime((int) time);
+         if (tbl.getParameters() == null ||
+             tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) {
+           tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time));
+         }
+ 
+         if (primaryKeys == null && foreignKeys == null
+                 && uniqueConstraints == null && notNullConstraints == null && defaultConstraints == null
+             && checkConstraints == null) {
+           ms.createTable(tbl);
+         } else {
+           // Check that constraints have catalog name properly set first
+           if (primaryKeys != null && !primaryKeys.isEmpty() && !primaryKeys.get(0).isSetCatName()) {
+             for (SQLPrimaryKey pkcol : primaryKeys) pkcol.setCatName(tbl.getCatName());
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty() && !foreignKeys.get(0).isSetCatName()) {
+             for (SQLForeignKey fkcol : foreignKeys) fkcol.setCatName(tbl.getCatName());
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty() && !uniqueConstraints.get(0).isSetCatName()) {
+             for (SQLUniqueConstraint uccol : uniqueConstraints) uccol.setCatName(tbl.getCatName());
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty() && !notNullConstraints.get(0).isSetCatName()) {
+             for (SQLNotNullConstraint nncol : notNullConstraints) nncol.setCatName(tbl.getCatName());
+           }
+           if (defaultConstraints != null && !defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) {
+             for (SQLDefaultConstraint dccol : defaultConstraints) dccol.setCatName(tbl.getCatName());
+           }
+           if (checkConstraints != null && !checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) {
+             for (SQLCheckConstraint cccol : checkConstraints) cccol.setCatName(tbl.getCatName());
+           }
+           // Set constraint name if null before sending to listener
+           List<String> constraintNames = ms.createTableWithConstraints(tbl, primaryKeys, foreignKeys,
+               uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+           int primaryKeySize = 0;
+           if (primaryKeys != null) {
+             primaryKeySize = primaryKeys.size();
+             for (int i = 0; i < primaryKeys.size(); i++) {
+               if (primaryKeys.get(i).getPk_name() == null) {
+                 primaryKeys.get(i).setPk_name(constraintNames.get(i));
+               }
+               if (!primaryKeys.get(i).isSetCatName()) primaryKeys.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int foreignKeySize = 0;
+           if (foreignKeys != null) {
+             foreignKeySize = foreignKeys.size();
+             for (int i = 0; i < foreignKeySize; i++) {
+               if (foreignKeys.get(i).getFk_name() == null) {
+                 foreignKeys.get(i).setFk_name(constraintNames.get(primaryKeySize + i));
+               }
+               if (!foreignKeys.get(i).isSetCatName()) foreignKeys.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int uniqueConstraintSize = 0;
+           if (uniqueConstraints != null) {
+             uniqueConstraintSize = uniqueConstraints.size();
+             for (int i = 0; i < uniqueConstraintSize; i++) {
+               if (uniqueConstraints.get(i).getUk_name() == null) {
+                 uniqueConstraints.get(i).setUk_name(constraintNames.get(primaryKeySize + foreignKeySize + i));
+               }
+               if (!uniqueConstraints.get(i).isSetCatName()) uniqueConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int notNullConstraintSize =  0;
+           if (notNullConstraints != null) {
+             for (int i = 0; i < notNullConstraints.size(); i++) {
+               if (notNullConstraints.get(i).getNn_name() == null) {
+                 notNullConstraints.get(i).setNn_name(constraintNames.get(primaryKeySize + foreignKeySize + uniqueConstraintSize + i));
+               }
+               if (!notNullConstraints.get(i).isSetCatName()) notNullConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           int defaultConstraintSize =  0;
+           if (defaultConstraints!= null) {
+             for (int i = 0; i < defaultConstraints.size(); i++) {
+               if (defaultConstraints.get(i).getDc_name() == null) {
+                 defaultConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize
+                     + uniqueConstraintSize + notNullConstraintSize + i));
+               }
+               if (!defaultConstraints.get(i).isSetCatName()) defaultConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+           if (checkConstraints!= null) {
+             for (int i = 0; i < checkConstraints.size(); i++) {
+               if (checkConstraints.get(i).getDc_name() == null) {
+                 checkConstraints.get(i).setDc_name(constraintNames.get(primaryKeySize + foreignKeySize
+                                                                              + uniqueConstraintSize
+                                                                              + defaultConstraintSize
+                                                                            + notNullConstraintSize + i));
+               }
+               if (!checkConstraints.get(i).isSetCatName()) checkConstraints.get(i).setCatName(tbl.getCatName());
+             }
+           }
+         }
+ 
+         if (!transactionalListeners.isEmpty()) {
+           transactionalListenerResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+               EventType.CREATE_TABLE, new CreateTableEvent(tbl, true, this), envContext);
+           if (primaryKeys != null && !primaryKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_PRIMARYKEY,
+                 new AddPrimaryKeyEvent(primaryKeys, true, this), envContext);
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_FOREIGNKEY,
+                 new AddForeignKeyEvent(foreignKeys, true, this), envContext);
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_UNIQUECONSTRAINT,
+                 new AddUniqueConstraintEvent(uniqueConstraints, true, this), envContext);
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(transactionalListeners, EventType.ADD_NOTNULLCONSTRAINT,
+                 new AddNotNullConstraintEvent(notNullConstraints, true, this), envContext);
+           }
+         }
+ 
+         success = ms.commitTransaction();
+       } finally {
+         if (!success) {
+           ms.rollbackTransaction();
+           if (madeDir) {
+             wh.deleteDir(tblPath, true, db);
+           }
+         }
+ 
+         if (!listeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(listeners, EventType.CREATE_TABLE,
+               new CreateTableEvent(tbl, success, this), envContext, transactionalListenerResponses, ms);
+           if (primaryKeys != null && !primaryKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_PRIMARYKEY,
+                 new AddPrimaryKeyEvent(primaryKeys, success, this), envContext);
+           }
+           if (foreignKeys != null && !foreignKeys.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_FOREIGNKEY,
+                 new AddForeignKeyEvent(foreignKeys, success, this), envContext);
+           }
+           if (uniqueConstraints != null && !uniqueConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_UNIQUECONSTRAINT,
+                 new AddUniqueConstraintEvent(uniqueConstraints, success, this), envContext);
+           }
+           if (notNullConstraints != null && !notNullConstraints.isEmpty()) {
+             MetaStoreListenerNotifier.notifyEvent(listeners, EventType.ADD_NOTNULLCONSTRAINT,
+                 new AddNotNullConstraintEvent(notNullConstraints, success, this), envContext);
+           }
+         }
+       }
+     }
+ 
+     @Override
+     public void create_table(final Table tbl) throws AlreadyExistsException,
+         MetaException, InvalidObjectException {
+       create_table_with_environment_context(tbl, null);
+     }
+ 
+     @Override
+     public void create_table_with_environment_context(final Table tbl,
+         final EnvironmentContext envContext)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       startFunction("create_table", ": " + tbl.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_table_core(getMS(), tbl, envContext);
+         success = true;
+       } catch (NoSuchObjectException e) {
+         LOG.warn("create_table_with_environment_context got ", e);
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_table", success, ex, tbl.getTableName());
+       }
+     }
+ 
+     @Override
+     public void create_table_with_constraints(final Table tbl,
+         final List<SQLPrimaryKey> primaryKeys, final List<SQLForeignKey> foreignKeys,
+         List<SQLUniqueConstraint> uniqueConstraints,
+         List<SQLNotNullConstraint> notNullConstraints,
+         List<SQLDefaultConstraint> defaultConstraints,
+         List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, MetaException, InvalidObjectException {
+       startFunction("create_table", ": " + tbl.toString());
+       boolean success = false;
+       Exception ex = null;
+       try {
+         create_table_core(getMS(), tbl, null, primaryKeys, foreignKeys,
+             uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+         success = true;
+       } catch (NoSuchObjectException e) {
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException | InvalidObjectException | AlreadyExistsException e) {
+         ex = e;
+         throw e;
+       } catch (Exception e) {
+         ex = e;
+         throw newMetaException(e);
+       } finally {
+         endFunction("create_table", success, ex, tbl.getTableName());
+       }
+     }
+ 
+     @Override
+     public void drop_constraint(DropConstraintRequest req)
+         throws MetaException, InvalidObjectException {
+       String catName = req.isSetCatName() ? req.getCatName() : getDefaultCatalog(conf);
+       String dbName = req.getDbname();
+       String tableName = req.getTablename();
+       String constraintName = req.getConstraintname();
+       startFunction("drop_constraint", ": " + constraintName);
+       boolean success = false;
+       Exception ex = null;
+       RawStore ms = getMS();
+       try {
+         ms.openTransaction();
+         ms.dropConstraint(catName, dbName, tableName, constraintName);
+         if (transactionalListeners.size() > 0) {
+           DropConstraintEvent dropConstraintEvent = new DropConstraintEvent(catName, dbName,
+               tableName, constraintName, true, this);
+           for (MetaStoreEventListener transactionalListener : transactionalListeners) {
+             transactionalListener.onDropConstraint(dropConstraintEvent);
+           }
+         }
+         success = ms.commitTransaction();
+       } catch (NoSuchObjectException e) {
+         ex = e;
+         throw new InvalidObjectException(e.getMessage());
+       } catch (MetaException e) {


<TRUNCATED>

[21/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
new file mode 100644
index 0000000..71ea2c6
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatRequest.java
@@ -0,0 +1,489 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class HeartbeatRequest implements org.apache.thrift.TBase<HeartbeatRequest, HeartbeatRequest._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatRequest");
+
+  private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new HeartbeatRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new HeartbeatRequestTupleSchemeFactory());
+  }
+
+  private long lockid; // optional
+  private long txnid; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOCKID((short)1, "lockid"),
+    TXNID((short)2, "txnid");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOCKID
+          return LOCKID;
+        case 2: // TXNID
+          return TXNID;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __LOCKID_ISSET_ID = 0;
+  private static final int __TXNID_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.LOCKID,_Fields.TXNID};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOCKID, new org.apache.thrift.meta_data.FieldMetaData("lockid", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatRequest.class, metaDataMap);
+  }
+
+  public HeartbeatRequest() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public HeartbeatRequest(HeartbeatRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.lockid = other.lockid;
+    this.txnid = other.txnid;
+  }
+
+  public HeartbeatRequest deepCopy() {
+    return new HeartbeatRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setLockidIsSet(false);
+    this.lockid = 0;
+    setTxnidIsSet(false);
+    this.txnid = 0;
+  }
+
+  public long getLockid() {
+    return this.lockid;
+  }
+
+  public void setLockid(long lockid) {
+    this.lockid = lockid;
+    setLockidIsSet(true);
+  }
+
+  public void unsetLockid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOCKID_ISSET_ID);
+  }
+
+  /** Returns true if field lockid is set (has been assigned a value) and false otherwise */
+  public boolean isSetLockid() {
+    return EncodingUtils.testBit(__isset_bitfield, __LOCKID_ISSET_ID);
+  }
+
+  public void setLockidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOCKID_ISSET_ID, value);
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOCKID:
+      if (value == null) {
+        unsetLockid();
+      } else {
+        setLockid((Long)value);
+      }
+      break;
+
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOCKID:
+      return getLockid();
+
+    case TXNID:
+      return getTxnid();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOCKID:
+      return isSetLockid();
+    case TXNID:
+      return isSetTxnid();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof HeartbeatRequest)
+      return this.equals((HeartbeatRequest)that);
+    return false;
+  }
+
+  public boolean equals(HeartbeatRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lockid = true && this.isSetLockid();
+    boolean that_present_lockid = true && that.isSetLockid();
+    if (this_present_lockid || that_present_lockid) {
+      if (!(this_present_lockid && that_present_lockid))
+        return false;
+      if (this.lockid != that.lockid)
+        return false;
+    }
+
+    boolean this_present_txnid = true && this.isSetTxnid();
+    boolean that_present_txnid = true && that.isSetTxnid();
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lockid = true && (isSetLockid());
+    list.add(present_lockid);
+    if (present_lockid)
+      list.add(lockid);
+
+    boolean present_txnid = true && (isSetTxnid());
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(HeartbeatRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLockid()).compareTo(other.isSetLockid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLockid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lockid, other.lockid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("HeartbeatRequest(");
+    boolean first = true;
+
+    if (isSetLockid()) {
+      sb.append("lockid:");
+      sb.append(this.lockid);
+      first = false;
+    }
+    if (isSetTxnid()) {
+      if (!first) sb.append(", ");
+      sb.append("txnid:");
+      sb.append(this.txnid);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class HeartbeatRequestStandardSchemeFactory implements SchemeFactory {
+    public HeartbeatRequestStandardScheme getScheme() {
+      return new HeartbeatRequestStandardScheme();
+    }
+  }
+
+  private static class HeartbeatRequestStandardScheme extends StandardScheme<HeartbeatRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOCKID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.lockid = iprot.readI64();
+              struct.setLockidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetLockid()) {
+        oprot.writeFieldBegin(LOCKID_FIELD_DESC);
+        oprot.writeI64(struct.lockid);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetTxnid()) {
+        oprot.writeFieldBegin(TXNID_FIELD_DESC);
+        oprot.writeI64(struct.txnid);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class HeartbeatRequestTupleSchemeFactory implements SchemeFactory {
+    public HeartbeatRequestTupleScheme getScheme() {
+      return new HeartbeatRequestTupleScheme();
+    }
+  }
+
+  private static class HeartbeatRequestTupleScheme extends TupleScheme<HeartbeatRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetLockid()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTxnid()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetLockid()) {
+        oprot.writeI64(struct.lockid);
+      }
+      if (struct.isSetTxnid()) {
+        oprot.writeI64(struct.txnid);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.lockid = iprot.readI64();
+        struct.setLockidIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.txnid = iprot.readI64();
+        struct.setTxnidIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
new file mode 100644
index 0000000..586449c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeRequest.java
@@ -0,0 +1,482 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class HeartbeatTxnRangeRequest implements org.apache.thrift.TBase<HeartbeatTxnRangeRequest, HeartbeatTxnRangeRequest._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatTxnRangeRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatTxnRangeRequest");
+
+  private static final org.apache.thrift.protocol.TField MIN_FIELD_DESC = new org.apache.thrift.protocol.TField("min", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField MAX_FIELD_DESC = new org.apache.thrift.protocol.TField("max", org.apache.thrift.protocol.TType.I64, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new HeartbeatTxnRangeRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new HeartbeatTxnRangeRequestTupleSchemeFactory());
+  }
+
+  private long min; // required
+  private long max; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MIN((short)1, "min"),
+    MAX((short)2, "max");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MIN
+          return MIN;
+        case 2: // MAX
+          return MAX;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __MIN_ISSET_ID = 0;
+  private static final int __MAX_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MIN, new org.apache.thrift.meta_data.FieldMetaData("min", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.MAX, new org.apache.thrift.meta_data.FieldMetaData("max", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatTxnRangeRequest.class, metaDataMap);
+  }
+
+  public HeartbeatTxnRangeRequest() {
+  }
+
+  public HeartbeatTxnRangeRequest(
+    long min,
+    long max)
+  {
+    this();
+    this.min = min;
+    setMinIsSet(true);
+    this.max = max;
+    setMaxIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public HeartbeatTxnRangeRequest(HeartbeatTxnRangeRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.min = other.min;
+    this.max = other.max;
+  }
+
+  public HeartbeatTxnRangeRequest deepCopy() {
+    return new HeartbeatTxnRangeRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setMinIsSet(false);
+    this.min = 0;
+    setMaxIsSet(false);
+    this.max = 0;
+  }
+
+  public long getMin() {
+    return this.min;
+  }
+
+  public void setMin(long min) {
+    this.min = min;
+    setMinIsSet(true);
+  }
+
+  public void unsetMin() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MIN_ISSET_ID);
+  }
+
+  /** Returns true if field min is set (has been assigned a value) and false otherwise */
+  public boolean isSetMin() {
+    return EncodingUtils.testBit(__isset_bitfield, __MIN_ISSET_ID);
+  }
+
+  public void setMinIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MIN_ISSET_ID, value);
+  }
+
+  public long getMax() {
+    return this.max;
+  }
+
+  public void setMax(long max) {
+    this.max = max;
+    setMaxIsSet(true);
+  }
+
+  public void unsetMax() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_ISSET_ID);
+  }
+
+  /** Returns true if field max is set (has been assigned a value) and false otherwise */
+  public boolean isSetMax() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAX_ISSET_ID);
+  }
+
+  public void setMaxIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MIN:
+      if (value == null) {
+        unsetMin();
+      } else {
+        setMin((Long)value);
+      }
+      break;
+
+    case MAX:
+      if (value == null) {
+        unsetMax();
+      } else {
+        setMax((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MIN:
+      return getMin();
+
+    case MAX:
+      return getMax();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MIN:
+      return isSetMin();
+    case MAX:
+      return isSetMax();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof HeartbeatTxnRangeRequest)
+      return this.equals((HeartbeatTxnRangeRequest)that);
+    return false;
+  }
+
+  public boolean equals(HeartbeatTxnRangeRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_min = true;
+    boolean that_present_min = true;
+    if (this_present_min || that_present_min) {
+      if (!(this_present_min && that_present_min))
+        return false;
+      if (this.min != that.min)
+        return false;
+    }
+
+    boolean this_present_max = true;
+    boolean that_present_max = true;
+    if (this_present_max || that_present_max) {
+      if (!(this_present_max && that_present_max))
+        return false;
+      if (this.max != that.max)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_min = true;
+    list.add(present_min);
+    if (present_min)
+      list.add(min);
+
+    boolean present_max = true;
+    list.add(present_max);
+    if (present_max)
+      list.add(max);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(HeartbeatTxnRangeRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMin()).compareTo(other.isSetMin());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMin()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.min, other.min);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMax()).compareTo(other.isSetMax());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMax()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max, other.max);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("HeartbeatTxnRangeRequest(");
+    boolean first = true;
+
+    sb.append("min:");
+    sb.append(this.min);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("max:");
+    sb.append(this.max);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetMin()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'min' is unset! Struct:" + toString());
+    }
+
+    if (!isSetMax()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'max' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class HeartbeatTxnRangeRequestStandardSchemeFactory implements SchemeFactory {
+    public HeartbeatTxnRangeRequestStandardScheme getScheme() {
+      return new HeartbeatTxnRangeRequestStandardScheme();
+    }
+  }
+
+  private static class HeartbeatTxnRangeRequestStandardScheme extends StandardScheme<HeartbeatTxnRangeRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MIN
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.min = iprot.readI64();
+              struct.setMinIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // MAX
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.max = iprot.readI64();
+              struct.setMaxIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(MIN_FIELD_DESC);
+      oprot.writeI64(struct.min);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(MAX_FIELD_DESC);
+      oprot.writeI64(struct.max);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class HeartbeatTxnRangeRequestTupleSchemeFactory implements SchemeFactory {
+    public HeartbeatTxnRangeRequestTupleScheme getScheme() {
+      return new HeartbeatTxnRangeRequestTupleScheme();
+    }
+  }
+
+  private static class HeartbeatTxnRangeRequestTupleScheme extends TupleScheme<HeartbeatTxnRangeRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.min);
+      oprot.writeI64(struct.max);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.min = iprot.readI64();
+      struct.setMinIsSet(true);
+      struct.max = iprot.readI64();
+      struct.setMaxIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
new file mode 100644
index 0000000..a3dceab
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java
@@ -0,0 +1,588 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class HeartbeatTxnRangeResponse implements org.apache.thrift.TBase<HeartbeatTxnRangeResponse, HeartbeatTxnRangeResponse._Fields>, java.io.Serializable, Cloneable, Comparable<HeartbeatTxnRangeResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HeartbeatTxnRangeResponse");
+
+  private static final org.apache.thrift.protocol.TField ABORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("aborted", org.apache.thrift.protocol.TType.SET, (short)1);
+  private static final org.apache.thrift.protocol.TField NOSUCH_FIELD_DESC = new org.apache.thrift.protocol.TField("nosuch", org.apache.thrift.protocol.TType.SET, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new HeartbeatTxnRangeResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new HeartbeatTxnRangeResponseTupleSchemeFactory());
+  }
+
+  private Set<Long> aborted; // required
+  private Set<Long> nosuch; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ABORTED((short)1, "aborted"),
+    NOSUCH((short)2, "nosuch");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ABORTED
+          return ABORTED;
+        case 2: // NOSUCH
+          return NOSUCH;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ABORTED, new org.apache.thrift.meta_data.FieldMetaData("aborted", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.NOSUCH, new org.apache.thrift.meta_data.FieldMetaData("nosuch", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HeartbeatTxnRangeResponse.class, metaDataMap);
+  }
+
+  public HeartbeatTxnRangeResponse() {
+  }
+
+  public HeartbeatTxnRangeResponse(
+    Set<Long> aborted,
+    Set<Long> nosuch)
+  {
+    this();
+    this.aborted = aborted;
+    this.nosuch = nosuch;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public HeartbeatTxnRangeResponse(HeartbeatTxnRangeResponse other) {
+    if (other.isSetAborted()) {
+      Set<Long> __this__aborted = new HashSet<Long>(other.aborted);
+      this.aborted = __this__aborted;
+    }
+    if (other.isSetNosuch()) {
+      Set<Long> __this__nosuch = new HashSet<Long>(other.nosuch);
+      this.nosuch = __this__nosuch;
+    }
+  }
+
+  public HeartbeatTxnRangeResponse deepCopy() {
+    return new HeartbeatTxnRangeResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.aborted = null;
+    this.nosuch = null;
+  }
+
+  public int getAbortedSize() {
+    return (this.aborted == null) ? 0 : this.aborted.size();
+  }
+
+  public java.util.Iterator<Long> getAbortedIterator() {
+    return (this.aborted == null) ? null : this.aborted.iterator();
+  }
+
+  public void addToAborted(long elem) {
+    if (this.aborted == null) {
+      this.aborted = new HashSet<Long>();
+    }
+    this.aborted.add(elem);
+  }
+
+  public Set<Long> getAborted() {
+    return this.aborted;
+  }
+
+  public void setAborted(Set<Long> aborted) {
+    this.aborted = aborted;
+  }
+
+  public void unsetAborted() {
+    this.aborted = null;
+  }
+
+  /** Returns true if field aborted is set (has been assigned a value) and false otherwise */
+  public boolean isSetAborted() {
+    return this.aborted != null;
+  }
+
+  public void setAbortedIsSet(boolean value) {
+    if (!value) {
+      this.aborted = null;
+    }
+  }
+
+  public int getNosuchSize() {
+    return (this.nosuch == null) ? 0 : this.nosuch.size();
+  }
+
+  public java.util.Iterator<Long> getNosuchIterator() {
+    return (this.nosuch == null) ? null : this.nosuch.iterator();
+  }
+
+  public void addToNosuch(long elem) {
+    if (this.nosuch == null) {
+      this.nosuch = new HashSet<Long>();
+    }
+    this.nosuch.add(elem);
+  }
+
+  public Set<Long> getNosuch() {
+    return this.nosuch;
+  }
+
+  public void setNosuch(Set<Long> nosuch) {
+    this.nosuch = nosuch;
+  }
+
+  public void unsetNosuch() {
+    this.nosuch = null;
+  }
+
+  /** Returns true if field nosuch is set (has been assigned a value) and false otherwise */
+  public boolean isSetNosuch() {
+    return this.nosuch != null;
+  }
+
+  public void setNosuchIsSet(boolean value) {
+    if (!value) {
+      this.nosuch = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ABORTED:
+      if (value == null) {
+        unsetAborted();
+      } else {
+        setAborted((Set<Long>)value);
+      }
+      break;
+
+    case NOSUCH:
+      if (value == null) {
+        unsetNosuch();
+      } else {
+        setNosuch((Set<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ABORTED:
+      return getAborted();
+
+    case NOSUCH:
+      return getNosuch();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ABORTED:
+      return isSetAborted();
+    case NOSUCH:
+      return isSetNosuch();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof HeartbeatTxnRangeResponse)
+      return this.equals((HeartbeatTxnRangeResponse)that);
+    return false;
+  }
+
+  public boolean equals(HeartbeatTxnRangeResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_aborted = true && this.isSetAborted();
+    boolean that_present_aborted = true && that.isSetAborted();
+    if (this_present_aborted || that_present_aborted) {
+      if (!(this_present_aborted && that_present_aborted))
+        return false;
+      if (!this.aborted.equals(that.aborted))
+        return false;
+    }
+
+    boolean this_present_nosuch = true && this.isSetNosuch();
+    boolean that_present_nosuch = true && that.isSetNosuch();
+    if (this_present_nosuch || that_present_nosuch) {
+      if (!(this_present_nosuch && that_present_nosuch))
+        return false;
+      if (!this.nosuch.equals(that.nosuch))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_aborted = true && (isSetAborted());
+    list.add(present_aborted);
+    if (present_aborted)
+      list.add(aborted);
+
+    boolean present_nosuch = true && (isSetNosuch());
+    list.add(present_nosuch);
+    if (present_nosuch)
+      list.add(nosuch);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(HeartbeatTxnRangeResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetAborted()).compareTo(other.isSetAborted());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAborted()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.aborted, other.aborted);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNosuch()).compareTo(other.isSetNosuch());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNosuch()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.nosuch, other.nosuch);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("HeartbeatTxnRangeResponse(");
+    boolean first = true;
+
+    sb.append("aborted:");
+    if (this.aborted == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.aborted);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("nosuch:");
+    if (this.nosuch == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.nosuch);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetAborted()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'aborted' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNosuch()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'nosuch' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class HeartbeatTxnRangeResponseStandardSchemeFactory implements SchemeFactory {
+    public HeartbeatTxnRangeResponseStandardScheme getScheme() {
+      return new HeartbeatTxnRangeResponseStandardScheme();
+    }
+  }
+
+  private static class HeartbeatTxnRangeResponseStandardScheme extends StandardScheme<HeartbeatTxnRangeResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ABORTED
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
+              {
+                org.apache.thrift.protocol.TSet _set674 = iprot.readSetBegin();
+                struct.aborted = new HashSet<Long>(2*_set674.size);
+                long _elem675;
+                for (int _i676 = 0; _i676 < _set674.size; ++_i676)
+                {
+                  _elem675 = iprot.readI64();
+                  struct.aborted.add(_elem675);
+                }
+                iprot.readSetEnd();
+              }
+              struct.setAbortedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // NOSUCH
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
+              {
+                org.apache.thrift.protocol.TSet _set677 = iprot.readSetBegin();
+                struct.nosuch = new HashSet<Long>(2*_set677.size);
+                long _elem678;
+                for (int _i679 = 0; _i679 < _set677.size; ++_i679)
+                {
+                  _elem678 = iprot.readI64();
+                  struct.nosuch.add(_elem678);
+                }
+                iprot.readSetEnd();
+              }
+              struct.setNosuchIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.aborted != null) {
+        oprot.writeFieldBegin(ABORTED_FIELD_DESC);
+        {
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size()));
+          for (long _iter680 : struct.aborted)
+          {
+            oprot.writeI64(_iter680);
+          }
+          oprot.writeSetEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.nosuch != null) {
+        oprot.writeFieldBegin(NOSUCH_FIELD_DESC);
+        {
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size()));
+          for (long _iter681 : struct.nosuch)
+          {
+            oprot.writeI64(_iter681);
+          }
+          oprot.writeSetEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class HeartbeatTxnRangeResponseTupleSchemeFactory implements SchemeFactory {
+    public HeartbeatTxnRangeResponseTupleScheme getScheme() {
+      return new HeartbeatTxnRangeResponseTupleScheme();
+    }
+  }
+
+  private static class HeartbeatTxnRangeResponseTupleScheme extends TupleScheme<HeartbeatTxnRangeResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.aborted.size());
+        for (long _iter682 : struct.aborted)
+        {
+          oprot.writeI64(_iter682);
+        }
+      }
+      {
+        oprot.writeI32(struct.nosuch.size());
+        for (long _iter683 : struct.nosuch)
+        {
+          oprot.writeI64(_iter683);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TSet _set684 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.aborted = new HashSet<Long>(2*_set684.size);
+        long _elem685;
+        for (int _i686 = 0; _i686 < _set684.size; ++_i686)
+        {
+          _elem685 = iprot.readI64();
+          struct.aborted.add(_elem685);
+        }
+      }
+      struct.setAbortedIsSet(true);
+      {
+        org.apache.thrift.protocol.TSet _set687 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.nosuch = new HashSet<Long>(2*_set687.size);
+        long _elem688;
+        for (int _i689 = 0; _i689 < _set687.size; ++_i689)
+        {
+          _elem688 = iprot.readI64();
+          struct.nosuch.add(_elem688);
+        }
+      }
+      struct.setNosuchIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
new file mode 100644
index 0000000..8b2817d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectPrivilege.java
@@ -0,0 +1,833 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class HiveObjectPrivilege implements org.apache.thrift.TBase<HiveObjectPrivilege, HiveObjectPrivilege._Fields>, java.io.Serializable, Cloneable, Comparable<HiveObjectPrivilege> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveObjectPrivilege");
+
+  private static final org.apache.thrift.protocol.TField HIVE_OBJECT_FIELD_DESC = new org.apache.thrift.protocol.TField("hiveObject", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("principalName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("principalType", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField GRANT_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("grantInfo", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+  private static final org.apache.thrift.protocol.TField AUTHORIZER_FIELD_DESC = new org.apache.thrift.protocol.TField("authorizer", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new HiveObjectPrivilegeStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new HiveObjectPrivilegeTupleSchemeFactory());
+  }
+
+  private HiveObjectRef hiveObject; // required
+  private String principalName; // required
+  private PrincipalType principalType; // required
+  private PrivilegeGrantInfo grantInfo; // required
+  private String authorizer; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    HIVE_OBJECT((short)1, "hiveObject"),
+    PRINCIPAL_NAME((short)2, "principalName"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    PRINCIPAL_TYPE((short)3, "principalType"),
+    GRANT_INFO((short)4, "grantInfo"),
+    AUTHORIZER((short)5, "authorizer");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // HIVE_OBJECT
+          return HIVE_OBJECT;
+        case 2: // PRINCIPAL_NAME
+          return PRINCIPAL_NAME;
+        case 3: // PRINCIPAL_TYPE
+          return PRINCIPAL_TYPE;
+        case 4: // GRANT_INFO
+          return GRANT_INFO;
+        case 5: // AUTHORIZER
+          return AUTHORIZER;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.HIVE_OBJECT, new org.apache.thrift.meta_data.FieldMetaData("hiveObject", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HiveObjectRef.class)));
+    tmpMap.put(_Fields.PRINCIPAL_NAME, new org.apache.thrift.meta_data.FieldMetaData("principalName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PRINCIPAL_TYPE, new org.apache.thrift.meta_data.FieldMetaData("principalType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.GRANT_INFO, new org.apache.thrift.meta_data.FieldMetaData("grantInfo", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrivilegeGrantInfo.class)));
+    tmpMap.put(_Fields.AUTHORIZER, new org.apache.thrift.meta_data.FieldMetaData("authorizer", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HiveObjectPrivilege.class, metaDataMap);
+  }
+
+  public HiveObjectPrivilege() {
+  }
+
+  public HiveObjectPrivilege(
+    HiveObjectRef hiveObject,
+    String principalName,
+    PrincipalType principalType,
+    PrivilegeGrantInfo grantInfo,
+    String authorizer)
+  {
+    this();
+    this.hiveObject = hiveObject;
+    this.principalName = principalName;
+    this.principalType = principalType;
+    this.grantInfo = grantInfo;
+    this.authorizer = authorizer;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public HiveObjectPrivilege(HiveObjectPrivilege other) {
+    if (other.isSetHiveObject()) {
+      this.hiveObject = new HiveObjectRef(other.hiveObject);
+    }
+    if (other.isSetPrincipalName()) {
+      this.principalName = other.principalName;
+    }
+    if (other.isSetPrincipalType()) {
+      this.principalType = other.principalType;
+    }
+    if (other.isSetGrantInfo()) {
+      this.grantInfo = new PrivilegeGrantInfo(other.grantInfo);
+    }
+    if (other.isSetAuthorizer()) {
+      this.authorizer = other.authorizer;
+    }
+  }
+
+  public HiveObjectPrivilege deepCopy() {
+    return new HiveObjectPrivilege(this);
+  }
+
+  @Override
+  public void clear() {
+    this.hiveObject = null;
+    this.principalName = null;
+    this.principalType = null;
+    this.grantInfo = null;
+    this.authorizer = null;
+  }
+
+  public HiveObjectRef getHiveObject() {
+    return this.hiveObject;
+  }
+
+  public void setHiveObject(HiveObjectRef hiveObject) {
+    this.hiveObject = hiveObject;
+  }
+
+  public void unsetHiveObject() {
+    this.hiveObject = null;
+  }
+
+  /** Returns true if field hiveObject is set (has been assigned a value) and false otherwise */
+  public boolean isSetHiveObject() {
+    return this.hiveObject != null;
+  }
+
+  public void setHiveObjectIsSet(boolean value) {
+    if (!value) {
+      this.hiveObject = null;
+    }
+  }
+
+  public String getPrincipalName() {
+    return this.principalName;
+  }
+
+  public void setPrincipalName(String principalName) {
+    this.principalName = principalName;
+  }
+
+  public void unsetPrincipalName() {
+    this.principalName = null;
+  }
+
+  /** Returns true if field principalName is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalName() {
+    return this.principalName != null;
+  }
+
+  public void setPrincipalNameIsSet(boolean value) {
+    if (!value) {
+      this.principalName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getPrincipalType() {
+    return this.principalType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setPrincipalType(PrincipalType principalType) {
+    this.principalType = principalType;
+  }
+
+  public void unsetPrincipalType() {
+    this.principalType = null;
+  }
+
+  /** Returns true if field principalType is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalType() {
+    return this.principalType != null;
+  }
+
+  public void setPrincipalTypeIsSet(boolean value) {
+    if (!value) {
+      this.principalType = null;
+    }
+  }
+
+  public PrivilegeGrantInfo getGrantInfo() {
+    return this.grantInfo;
+  }
+
+  public void setGrantInfo(PrivilegeGrantInfo grantInfo) {
+    this.grantInfo = grantInfo;
+  }
+
+  public void unsetGrantInfo() {
+    this.grantInfo = null;
+  }
+
+  /** Returns true if field grantInfo is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantInfo() {
+    return this.grantInfo != null;
+  }
+
+  public void setGrantInfoIsSet(boolean value) {
+    if (!value) {
+      this.grantInfo = null;
+    }
+  }
+
+  public String getAuthorizer() {
+    return this.authorizer;
+  }
+
+  public void setAuthorizer(String authorizer) {
+    this.authorizer = authorizer;
+  }
+
+  public void unsetAuthorizer() {
+    this.authorizer = null;
+  }
+
+  /** Returns true if field authorizer is set (has been assigned a value) and false otherwise */
+  public boolean isSetAuthorizer() {
+    return this.authorizer != null;
+  }
+
+  public void setAuthorizerIsSet(boolean value) {
+    if (!value) {
+      this.authorizer = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case HIVE_OBJECT:
+      if (value == null) {
+        unsetHiveObject();
+      } else {
+        setHiveObject((HiveObjectRef)value);
+      }
+      break;
+
+    case PRINCIPAL_NAME:
+      if (value == null) {
+        unsetPrincipalName();
+      } else {
+        setPrincipalName((String)value);
+      }
+      break;
+
+    case PRINCIPAL_TYPE:
+      if (value == null) {
+        unsetPrincipalType();
+      } else {
+        setPrincipalType((PrincipalType)value);
+      }
+      break;
+
+    case GRANT_INFO:
+      if (value == null) {
+        unsetGrantInfo();
+      } else {
+        setGrantInfo((PrivilegeGrantInfo)value);
+      }
+      break;
+
+    case AUTHORIZER:
+      if (value == null) {
+        unsetAuthorizer();
+      } else {
+        setAuthorizer((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case HIVE_OBJECT:
+      return getHiveObject();
+
+    case PRINCIPAL_NAME:
+      return getPrincipalName();
+
+    case PRINCIPAL_TYPE:
+      return getPrincipalType();
+
+    case GRANT_INFO:
+      return getGrantInfo();
+
+    case AUTHORIZER:
+      return getAuthorizer();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case HIVE_OBJECT:
+      return isSetHiveObject();
+    case PRINCIPAL_NAME:
+      return isSetPrincipalName();
+    case PRINCIPAL_TYPE:
+      return isSetPrincipalType();
+    case GRANT_INFO:
+      return isSetGrantInfo();
+    case AUTHORIZER:
+      return isSetAuthorizer();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof HiveObjectPrivilege)
+      return this.equals((HiveObjectPrivilege)that);
+    return false;
+  }
+
+  public boolean equals(HiveObjectPrivilege that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_hiveObject = true && this.isSetHiveObject();
+    boolean that_present_hiveObject = true && that.isSetHiveObject();
+    if (this_present_hiveObject || that_present_hiveObject) {
+      if (!(this_present_hiveObject && that_present_hiveObject))
+        return false;
+      if (!this.hiveObject.equals(that.hiveObject))
+        return false;
+    }
+
+    boolean this_present_principalName = true && this.isSetPrincipalName();
+    boolean that_present_principalName = true && that.isSetPrincipalName();
+    if (this_present_principalName || that_present_principalName) {
+      if (!(this_present_principalName && that_present_principalName))
+        return false;
+      if (!this.principalName.equals(that.principalName))
+        return false;
+    }
+
+    boolean this_present_principalType = true && this.isSetPrincipalType();
+    boolean that_present_principalType = true && that.isSetPrincipalType();
+    if (this_present_principalType || that_present_principalType) {
+      if (!(this_present_principalType && that_present_principalType))
+        return false;
+      if (!this.principalType.equals(that.principalType))
+        return false;
+    }
+
+    boolean this_present_grantInfo = true && this.isSetGrantInfo();
+    boolean that_present_grantInfo = true && that.isSetGrantInfo();
+    if (this_present_grantInfo || that_present_grantInfo) {
+      if (!(this_present_grantInfo && that_present_grantInfo))
+        return false;
+      if (!this.grantInfo.equals(that.grantInfo))
+        return false;
+    }
+
+    boolean this_present_authorizer = true && this.isSetAuthorizer();
+    boolean that_present_authorizer = true && that.isSetAuthorizer();
+    if (this_present_authorizer || that_present_authorizer) {
+      if (!(this_present_authorizer && that_present_authorizer))
+        return false;
+      if (!this.authorizer.equals(that.authorizer))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_hiveObject = true && (isSetHiveObject());
+    list.add(present_hiveObject);
+    if (present_hiveObject)
+      list.add(hiveObject);
+
+    boolean present_principalName = true && (isSetPrincipalName());
+    list.add(present_principalName);
+    if (present_principalName)
+      list.add(principalName);
+
+    boolean present_principalType = true && (isSetPrincipalType());
+    list.add(present_principalType);
+    if (present_principalType)
+      list.add(principalType.getValue());
+
+    boolean present_grantInfo = true && (isSetGrantInfo());
+    list.add(present_grantInfo);
+    if (present_grantInfo)
+      list.add(grantInfo);
+
+    boolean present_authorizer = true && (isSetAuthorizer());
+    list.add(present_authorizer);
+    if (present_authorizer)
+      list.add(authorizer);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(HiveObjectPrivilege other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetHiveObject()).compareTo(other.isSetHiveObject());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHiveObject()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hiveObject, other.hiveObject);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipalName()).compareTo(other.isSetPrincipalName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalName, other.principalName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipalType()).compareTo(other.isSetPrincipalType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalType, other.principalType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantInfo()).compareTo(other.isSetGrantInfo());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantInfo()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantInfo, other.grantInfo);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAuthorizer()).compareTo(other.isSetAuthorizer());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAuthorizer()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.authorizer, other.authorizer);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("HiveObjectPrivilege(");
+    boolean first = true;
+
+    sb.append("hiveObject:");
+    if (this.hiveObject == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.hiveObject);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principalName:");
+    if (this.principalName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principalType:");
+    if (this.principalType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantInfo:");
+    if (this.grantInfo == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.grantInfo);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("authorizer:");
+    if (this.authorizer == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.authorizer);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (hiveObject != null) {
+      hiveObject.validate();
+    }
+    if (grantInfo != null) {
+      grantInfo.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class HiveObjectPrivilegeStandardSchemeFactory implements SchemeFactory {
+    public HiveObjectPrivilegeStandardScheme getScheme() {
+      return new HiveObjectPrivilegeStandardScheme();
+    }
+  }
+
+  private static class HiveObjectPrivilegeStandardScheme extends StandardScheme<HiveObjectPrivilege> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, HiveObjectPrivilege struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // HIVE_OBJECT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.hiveObject = new HiveObjectRef();
+              struct.hiveObject.read(iprot);
+              struct.setHiveObjectIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PRINCIPAL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.principalName = iprot.readString();
+              struct.setPrincipalNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PRINCIPAL_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.principalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setPrincipalTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // GRANT_INFO
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.grantInfo = new PrivilegeGrantInfo();
+              struct.grantInfo.read(iprot);
+              struct.setGrantInfoIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // AUTHORIZER
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.authorizer = iprot.readString();
+              struct.setAuthorizerIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, HiveObjectPrivilege struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.hiveObject != null) {
+        oprot.writeFieldBegin(HIVE_OBJECT_FIELD_DESC);
+        struct.hiveObject.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principalName != null) {
+        oprot.writeFieldBegin(PRINCIPAL_NAME_FIELD_DESC);
+        oprot.writeString(struct.principalName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principalType != null) {
+        oprot.writeFieldBegin(PRINCIPAL_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.principalType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.grantInfo != null) {
+        oprot.writeFieldBegin(GRANT_INFO_FIELD_DESC);
+        struct.grantInfo.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.authorizer != null) {
+        oprot.writeFieldBegin(AUTHORIZER_FIELD_DESC);
+        oprot.writeString(struct.authorizer);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class HiveObjectPrivilegeTupleSchemeFactory implements SchemeFactory {
+    public HiveObjectPrivilegeTupleScheme getScheme() {
+      return new HiveObjectPrivilegeTupleScheme();
+    }
+  }
+
+  private static class HiveObjectPrivilegeTupleScheme extends TupleScheme<HiveObjectPrivilege> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectPrivilege struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetHiveObject()) {
+        optionals.set(0);
+      }
+      if (struct.isSetPrincipalName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetPrincipalType()) {
+        optionals.set(2);
+      }
+      if (struct.isSetGrantInfo()) {
+        optionals.set(3);
+      }
+      if (struct.isSetAuthorizer()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetHiveObject()) {
+        struct.hiveObject.write(oprot);
+      }
+      if (struct.isSetPrincipalName()) {
+        oprot.writeString(struct.principalName);
+      }
+      if (struct.isSetPrincipalType()) {
+        oprot.writeI32(struct.principalType.getValue());
+      }
+      if (struct.isSetGrantInfo()) {
+        struct.grantInfo.write(oprot);
+      }
+      if (struct.isSetAuthorizer()) {
+        oprot.writeString(struct.authorizer);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectPrivilege struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.hiveObject = new HiveObjectRef();
+        struct.hiveObject.read(iprot);
+        struct.setHiveObjectIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.principalName = iprot.readString();
+        struct.setPrincipalNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.principalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setPrincipalTypeIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.grantInfo = new PrivilegeGrantInfo();
+        struct.grantInfo.read(iprot);
+        struct.setGrantInfoIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.authorizer = iprot.readString();
+        struct.setAuthorizerIsSet(true);
+      }
+    }
+  }
+
+}
+


[67/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index 0000000,8ff056f..8539605
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@@ -1,0 -1,2532 +1,2530 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ 
+ import java.nio.ByteBuffer;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.EmptyStackException;
+ import java.util.HashMap;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Stack;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ScheduledExecutorService;
+ import java.util.concurrent.ThreadFactory;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.regex.Matcher;
+ import java.util.regex.Pattern;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.DatabaseName;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.Deadline;
+ import org.apache.hadoop.hive.metastore.FileMetadataHandler;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
+ import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
+ import org.apache.hadoop.hive.metastore.RawStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.cache.SharedCache.StatsType;
+ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregator;
+ import org.apache.hadoop.hive.metastore.columnstats.aggr.ColumnStatsAggregatorFactory;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.hadoop.hive.metastore.utils.StringUtils;
+ import org.apache.thrift.TException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ // TODO filter->expr
+ // TODO functionCache
+ // TODO constraintCache
+ // TODO need sd nested copy?
+ // TODO String intern
+ // TODO monitor event queue
+ // TODO initial load slow?
+ // TODO size estimation
+ 
+ public class CachedStore implements RawStore, Configurable {
+   private static ScheduledExecutorService cacheUpdateMaster = null;
+   private static List<Pattern> whitelistPatterns = null;
+   private static List<Pattern> blacklistPatterns = null;
+   // Default value set to 100 milliseconds for test purpose
+   private static long DEFAULT_CACHE_REFRESH_PERIOD = 100;
+   // Time after which metastore cache is updated from metastore DB by the background update thread
+   private static long cacheRefreshPeriodMS = DEFAULT_CACHE_REFRESH_PERIOD;
+   private static AtomicBoolean isCachePrewarmed = new AtomicBoolean(false);
+   private static TablesPendingPrewarm tblsPendingPrewarm = new TablesPendingPrewarm();
+   private RawStore rawStore = null;
+   private Configuration conf;
+   private PartitionExpressionProxy expressionProxy = null;
+   private static final SharedCache sharedCache = new SharedCache();
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(CachedStore.class.getName());
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     setConfInternal(conf);
+     initBlackListWhiteList(conf);
+     initSharedCache(conf);
+     startCacheUpdateService(conf, false, true);
+   }
+ 
+   /**
+    * Similar to setConf but used from within the tests
+    * This does start the background thread for prewarm and update
+    * @param conf
+    */
+   void setConfForTest(Configuration conf) {
+     setConfInternal(conf);
+     initBlackListWhiteList(conf);
+     initSharedCache(conf);
+   }
+ 
+   private void setConfInternal(Configuration conf) {
+     String rawStoreClassName =
+         MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName());
+     if (rawStore == null) {
+       try {
+         rawStore = (JavaUtils.getClass(rawStoreClassName, RawStore.class)).newInstance();
+       } catch (Exception e) {
+         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+       }
+     }
+     rawStore.setConf(conf);
+     Configuration oldConf = this.conf;
+     this.conf = conf;
+     if (expressionProxy != null && conf != oldConf) {
+       LOG.warn("Unexpected setConf when we were already configured");
+     } else {
+       expressionProxy = PartFilterExprUtil.createExpressionProxy(conf);
+     }
+   }
+ 
+   private void initSharedCache(Configuration conf) {
+     long maxSharedCacheSizeInBytes =
+         MetastoreConf.getSizeVar(conf, ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY);
+     sharedCache.initialize(maxSharedCacheSizeInBytes);
+     if (maxSharedCacheSizeInBytes > 0) {
+       LOG.info("Maximum memory that the cache will use: {} GB",
+           maxSharedCacheSizeInBytes / (1024 * 1024 * 1024));
+     }
+   }
+ 
+   @VisibleForTesting
+   /**
+    * This initializes the caches in SharedCache by getting the objects from Metastore DB via
+    * ObjectStore and populating the respective caches
+    */
+   static void prewarm(RawStore rawStore) {
+     if (isCachePrewarmed.get()) {
+       return;
+     }
+     long startTime = System.nanoTime();
+     LOG.info("Prewarming CachedStore");
+     while (!isCachePrewarmed.get()) {
+       // Prevents throwing exceptions in our raw store calls since we're not using RawStoreProxy
+       Deadline.registerIfNot(1000000);
+       Collection<String> catalogsToCache;
+       try {
+         catalogsToCache = catalogsToCache(rawStore);
+         LOG.info("Going to cache catalogs: "
+             + org.apache.commons.lang.StringUtils.join(catalogsToCache, ", "));
+         List<Catalog> catalogs = new ArrayList<>(catalogsToCache.size());
+         for (String catName : catalogsToCache) {
+           catalogs.add(rawStore.getCatalog(catName));
+         }
+         sharedCache.populateCatalogsInCache(catalogs);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.warn("Failed to populate catalogs in cache, going to try again", e);
+         // try again
+         continue;
+       }
+       LOG.info("Finished prewarming catalogs, starting on databases");
+       List<Database> databases = new ArrayList<>();
+       for (String catName : catalogsToCache) {
+         try {
+           List<String> dbNames = rawStore.getAllDatabases(catName);
+           LOG.info("Number of databases to prewarm in catalog {}: {}", catName, dbNames.size());
+           for (String dbName : dbNames) {
+             try {
+               databases.add(rawStore.getDatabase(catName, dbName));
+             } catch (NoSuchObjectException e) {
+               // Continue with next database
+               LOG.warn("Failed to cache database "
+                   + DatabaseName.getQualified(catName, dbName) + ", moving on", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.warn("Failed to cache databases in catalog " + catName + ", moving on", e);
+         }
+       }
+       sharedCache.populateDatabasesInCache(databases);
+       LOG.info(
+           "Databases cache is now prewarmed. Now adding tables, partitions and statistics to the cache");
+       int numberOfDatabasesCachedSoFar = 0;
+       for (Database db : databases) {
+         String catName = StringUtils.normalizeIdentifier(db.getCatalogName());
+         String dbName = StringUtils.normalizeIdentifier(db.getName());
+         List<String> tblNames;
+         try {
+           tblNames = rawStore.getAllTables(catName, dbName);
+         } catch (MetaException e) {
+           LOG.warn("Failed to cache tables for database "
+               + DatabaseName.getQualified(catName, dbName) + ", moving on");
+           // Continue with next database
+           continue;
+         }
+         tblsPendingPrewarm.addTableNamesForPrewarming(tblNames);
+         int totalTablesToCache = tblNames.size();
+         int numberOfTablesCachedSoFar = 0;
+         while (tblsPendingPrewarm.hasMoreTablesToPrewarm()) {
+           try {
+             String tblName =
+                 StringUtils.normalizeIdentifier(tblsPendingPrewarm.getNextTableNameToPrewarm());
+             if (!shouldCacheTable(catName, dbName, tblName)) {
+               continue;
+             }
+             Table table;
+             try {
+               table = rawStore.getTable(catName, dbName, tblName);
+             } catch (MetaException e) {
+               // It is possible the table is deleted during fetching tables of the database,
+               // in that case, continue with the next table
+               continue;
+             }
+             List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+             try {
+               ColumnStatistics tableColStats = null;
+               List<Partition> partitions = null;
+               List<ColumnStatistics> partitionColStats = null;
+               AggrStats aggrStatsAllPartitions = null;
+               AggrStats aggrStatsAllButDefaultPartition = null;
+               if (table.isSetPartitionKeys()) {
+                 Deadline.startTimer("getPartitions");
+                 partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
+                 Deadline.stopTimer();
+                 List<String> partNames = new ArrayList<>(partitions.size());
+                 for (Partition p : partitions) {
+                   partNames.add(Warehouse.makePartName(table.getPartitionKeys(), p.getValues()));
+                 }
+                 if (!partNames.isEmpty()) {
+                   // Get partition column stats for this table
+                   Deadline.startTimer("getPartitionColumnStatistics");
+                   partitionColStats = rawStore.getPartitionColumnStatistics(catName, dbName,
+                       tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                   // Get aggregate stats for all partitions of a table and for all but default
+                   // partition
+                   Deadline.startTimer("getAggrPartitionColumnStatistics");
+                   aggrStatsAllPartitions =
+                       rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                   // Remove default partition from partition names and get aggregate
+                   // stats again
+                   List<FieldSchema> partKeys = table.getPartitionKeys();
+                   String defaultPartitionValue =
+                       MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
+                   List<String> partCols = new ArrayList<>();
+                   List<String> partVals = new ArrayList<>();
+                   for (FieldSchema fs : partKeys) {
+                     partCols.add(fs.getName());
+                     partVals.add(defaultPartitionValue);
+                   }
+                   String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
+                   partNames.remove(defaultPartitionName);
+                   Deadline.startTimer("getAggrPartitionColumnStatistics");
+                   aggrStatsAllButDefaultPartition =
+                       rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+                   Deadline.stopTimer();
+                 }
+               } else {
+                 Deadline.startTimer("getTableColumnStatistics");
+                 tableColStats =
+                     rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+                 Deadline.stopTimer();
+               }
++              // TODO## should this take write ID into account? or at least cache write ID to verify?
+               // If the table could not cached due to memory limit, stop prewarm
+               boolean isSuccess = sharedCache.populateTableInCache(table, tableColStats, partitions,
+                   partitionColStats, aggrStatsAllPartitions, aggrStatsAllButDefaultPartition);
+               if (isSuccess) {
+                 LOG.trace("Cached Database: {}'s Table: {}.", dbName, tblName);
+               } else {
+                 LOG.info(
+                     "Unable to cache Database: {}'s Table: {}, since the cache memory is full. "
+                         + "Will stop attempting to cache any more tables.",
+                     dbName, tblName);
+                 completePrewarm(startTime);
+                 return;
+               }
+             } catch (MetaException | NoSuchObjectException e) {
+               // Continue with next table
+               continue;
+             }
+             LOG.debug("Processed database: {}'s table: {}. Cached {} / {}  tables so far.", dbName,
+                 tblName, ++numberOfTablesCachedSoFar, totalTablesToCache);
+           } catch (EmptyStackException e) {
+             // We've prewarmed this database, continue with the next one
+             continue;
+           }
+         }
+         LOG.debug("Processed database: {}. Cached {} / {} databases so far.", dbName,
+             ++numberOfDatabasesCachedSoFar, databases.size());
+       }
+       completePrewarm(startTime);
+     }
+   }
+ 
+   private static void completePrewarm(long startTime) {
+     isCachePrewarmed.set(true);
+     LOG.info("CachedStore initialized");
+     long endTime = System.nanoTime();
+     LOG.info("Time taken in prewarming = " + (endTime - startTime) / 1000000 + "ms");
+     sharedCache.completeTableCachePrewarm();
+   }
+ 
+   static class TablesPendingPrewarm {
+     private Stack<String> tableNames = new Stack<>();
+ 
+     private synchronized void addTableNamesForPrewarming(List<String> tblNames) {
+       tableNames.clear();
+       if (tblNames != null) {
+         tableNames.addAll(tblNames);
+       }
+     }
+ 
+     private synchronized boolean hasMoreTablesToPrewarm() {
+       return !tableNames.empty();
+     }
+ 
+     private synchronized String getNextTableNameToPrewarm() {
+       return tableNames.pop();
+     }
+ 
+     private synchronized void prioritizeTableForPrewarm(String tblName) {
+       // If the table is in the pending prewarm list, move it to the top
+       if (tableNames.remove(tblName)) {
+         tableNames.push(tblName);
+       }
+     }
+   }
+ 
+   @VisibleForTesting
+   static void setCachePrewarmedState(boolean state) {
+     isCachePrewarmed.set(state);
+   }
+ 
+   private static void initBlackListWhiteList(Configuration conf) {
+     if (whitelistPatterns == null || blacklistPatterns == null) {
+       whitelistPatterns = createPatterns(MetastoreConf.getAsString(conf,
+           MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_WHITELIST));
+       blacklistPatterns = createPatterns(MetastoreConf.getAsString(conf,
+           MetastoreConf.ConfVars.CACHED_RAW_STORE_CACHED_OBJECTS_BLACKLIST));
+     }
+   }
+ 
+   private static Collection<String> catalogsToCache(RawStore rs) throws MetaException {
+     Collection<String> confValue =
+         MetastoreConf.getStringCollection(rs.getConf(), ConfVars.CATALOGS_TO_CACHE);
+     if (confValue == null || confValue.isEmpty() ||
+         (confValue.size() == 1 && confValue.contains(""))) {
+       return rs.getCatalogs();
+     } else {
+       return confValue;
+     }
+   }
+ 
+   @VisibleForTesting
+   /**
+    * This starts a background thread, which initially populates the SharedCache and later
+    * periodically gets updates from the metastore db
+    *
+    * @param conf
+    * @param runOnlyOnce
+    * @param shouldRunPrewarm
+    */
+   static synchronized void startCacheUpdateService(Configuration conf, boolean runOnlyOnce,
+       boolean shouldRunPrewarm) {
+     if (cacheUpdateMaster == null) {
+       initBlackListWhiteList(conf);
+       if (!MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST)) {
+         cacheRefreshPeriodMS = MetastoreConf.getTimeVar(conf,
+             ConfVars.CACHED_RAW_STORE_CACHE_UPDATE_FREQUENCY, TimeUnit.MILLISECONDS);
+       }
+       LOG.info("CachedStore: starting cache update service (run every {} ms", cacheRefreshPeriodMS);
+       cacheUpdateMaster = Executors.newScheduledThreadPool(1, new ThreadFactory() {
+         @Override
+         public Thread newThread(Runnable r) {
+           Thread t = Executors.defaultThreadFactory().newThread(r);
+           t.setName("CachedStore-CacheUpdateService: Thread-" + t.getId());
+           t.setDaemon(true);
+           return t;
+         }
+       });
+       if (!runOnlyOnce) {
+         cacheUpdateMaster.scheduleAtFixedRate(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0,
+             cacheRefreshPeriodMS, TimeUnit.MILLISECONDS);
+       }
+     }
+     if (runOnlyOnce) {
+       // Some tests control the execution of the background update thread
+       cacheUpdateMaster.schedule(new CacheUpdateMasterWork(conf, shouldRunPrewarm), 0,
+           TimeUnit.MILLISECONDS);
+     }
+   }
+ 
+   @VisibleForTesting
+   static synchronized boolean stopCacheUpdateService(long timeout) {
+     boolean tasksStoppedBeforeShutdown = false;
+     if (cacheUpdateMaster != null) {
+       LOG.info("CachedStore: shutting down cache update service");
+       try {
+         tasksStoppedBeforeShutdown =
+             cacheUpdateMaster.awaitTermination(timeout, TimeUnit.MILLISECONDS);
+       } catch (InterruptedException e) {
+         LOG.info("CachedStore: cache update service was interrupted while waiting for tasks to "
+             + "complete before shutting down. Will make a hard stop now.");
+       }
+       cacheUpdateMaster.shutdownNow();
+       cacheUpdateMaster = null;
+     }
+     return tasksStoppedBeforeShutdown;
+   }
+ 
+   @VisibleForTesting
+   static void setCacheRefreshPeriod(long time) {
+     cacheRefreshPeriodMS = time;
+   }
+ 
+   static class CacheUpdateMasterWork implements Runnable {
+     private boolean shouldRunPrewarm = true;
+     private final RawStore rawStore;
+ 
+     CacheUpdateMasterWork(Configuration conf, boolean shouldRunPrewarm) {
+       this.shouldRunPrewarm = shouldRunPrewarm;
+       String rawStoreClassName =
+           MetastoreConf.getVar(conf, ConfVars.CACHED_RAW_STORE_IMPL, ObjectStore.class.getName());
+       try {
+         rawStore = JavaUtils.getClass(rawStoreClassName, RawStore.class).newInstance();
+         rawStore.setConf(conf);
+       } catch (InstantiationException | IllegalAccessException | MetaException e) {
+         // MetaException here really means ClassNotFound (see the utility method).
+         // So, if any of these happen, that means we can never succeed.
+         throw new RuntimeException("Cannot instantiate " + rawStoreClassName, e);
+       }
+     }
+ 
+     @Override
+     public void run() {
+       if (!shouldRunPrewarm) {
+         // TODO: prewarm and update can probably be merged.
+         update();
+       } else {
+         try {
+           prewarm(rawStore);
+         } catch (Exception e) {
+           LOG.error("Prewarm failure", e);
+           return;
+         }
+       }
+     }
+ 
+     void update() {
+       Deadline.registerIfNot(1000000);
+       LOG.debug("CachedStore: updating cached objects");
+       try {
+         for (String catName : catalogsToCache(rawStore)) {
+           List<String> dbNames = rawStore.getAllDatabases(catName);
+           // Update the database in cache
+           updateDatabases(rawStore, catName, dbNames);
+           for (String dbName : dbNames) {
+             // Update the tables in cache
+             updateTables(rawStore, catName, dbName);
+             List<String> tblNames;
+             try {
+               tblNames = rawStore.getAllTables(catName, dbName);
+             } catch (MetaException e) {
+               // Continue with next database
+               continue;
+             }
+             for (String tblName : tblNames) {
+               if (!shouldCacheTable(catName, dbName, tblName)) {
+                 continue;
+               }
+               // Update the table column stats for a table in cache
+               updateTableColStats(rawStore, catName, dbName, tblName);
+               // Update the partitions for a table in cache
+               updateTablePartitions(rawStore, catName, dbName, tblName);
+               // Update the partition col stats for a table in cache
+               updateTablePartitionColStats(rawStore, catName, dbName, tblName);
+               // Update aggregate partition column stats for a table in cache
+               updateTableAggregatePartitionColStats(rawStore, catName, dbName, tblName);
+             }
+           }
+       }
+       sharedCache.incrementUpdateCount();
+       } catch (MetaException e) {
+         LOG.error("Updating CachedStore: error happen when refresh; skipping this iteration", e);
+       }
+     }
+ 
+ 
+     private void updateDatabases(RawStore rawStore, String catName, List<String> dbNames) {
+       // Prepare the list of databases
+       List<Database> databases = new ArrayList<>();
+       for (String dbName : dbNames) {
+         Database db;
+         try {
+           db = rawStore.getDatabase(catName, dbName);
+           databases.add(db);
+         } catch (NoSuchObjectException e) {
+           LOG.info("Updating CachedStore: database - " + catName + "." + dbName
+               + " does not exist.", e);
+         }
+       }
+       sharedCache.refreshDatabasesInCache(databases);
+     }
+ 
+     private void updateTables(RawStore rawStore, String catName, String dbName) {
+       List<Table> tables = new ArrayList<>();
+       try {
+         List<String> tblNames = rawStore.getAllTables(catName, dbName);
+         for (String tblName : tblNames) {
+           if (!shouldCacheTable(catName, dbName, tblName)) {
+             continue;
+           }
+           Table table = rawStore.getTable(StringUtils.normalizeIdentifier(catName),
+               StringUtils.normalizeIdentifier(dbName),
+               StringUtils.normalizeIdentifier(tblName));
+           tables.add(table);
+         }
+         sharedCache.refreshTablesInCache(catName, dbName, tables);
+       } catch (MetaException e) {
+         LOG.debug("Unable to refresh cached tables for database: " + dbName, e);
+       }
+     }
+ 
+ 
+     private void updateTableColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         if (!table.isSetPartitionKeys()) {
+           List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+           Deadline.startTimer("getTableColumnStatistics");
+           ColumnStatistics tableColStats =
+               rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
+           Deadline.stopTimer();
+           if (tableColStats != null) {
++            // TODO## should this take write ID into account? or at least cache write ID to verify?
+             sharedCache.refreshTableColStatsInCache(StringUtils.normalizeIdentifier(catName),
+                 StringUtils.normalizeIdentifier(dbName),
+                 StringUtils.normalizeIdentifier(tblName), tableColStats.getStatsObj());
+           }
+         }
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Unable to refresh table column stats for table: " + tblName, e);
+       }
+     }
+ 
+     private void updateTablePartitions(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Deadline.startTimer("getPartitions");
+         List<Partition> partitions = rawStore.getPartitions(catName, dbName, tblName, Integer.MAX_VALUE);
+         Deadline.stopTimer();
+         sharedCache.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName),
+             StringUtils.normalizeIdentifier(dbName),
+             StringUtils.normalizeIdentifier(tblName), partitions);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+       }
+     }
+ 
+     private void updateTablePartitionColStats(RawStore rawStore, String catName, String dbName, String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+         // Get partition column stats for this table
+         Deadline.startTimer("getPartitionColumnStatistics");
++        // TODO## should this take write ID into account? or at least cache write ID to verify?
+         List<ColumnStatistics> partitionColStats =
+             rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+         Deadline.stopTimer();
+         sharedCache.refreshPartitionColStatsInCache(catName, dbName, tblName, partitionColStats);
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read partitions of table: " + tblName, e);
+       }
+     }
+ 
+     // Update cached aggregate stats for all partitions of a table and for all
+     // but default partition
+     private void updateTableAggregatePartitionColStats(RawStore rawStore, String catName, String dbName,
+                                                        String tblName) {
+       try {
+         Table table = rawStore.getTable(catName, dbName, tblName);
+         List<String> partNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+         List<String> colNames = MetaStoreUtils.getColumnNamesForTable(table);
+         if ((partNames != null) && (partNames.size() > 0)) {
+           Deadline.startTimer("getAggregareStatsForAllPartitions");
+           AggrStats aggrStatsAllPartitions =
+               rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+           Deadline.stopTimer();
+           // Remove default partition from partition names and get aggregate stats again
+           List<FieldSchema> partKeys = table.getPartitionKeys();
+           String defaultPartitionValue =
+               MetastoreConf.getVar(rawStore.getConf(), ConfVars.DEFAULTPARTITIONNAME);
+           List<String> partCols = new ArrayList<String>();
+           List<String> partVals = new ArrayList<String>();
+           for (FieldSchema fs : partKeys) {
+             partCols.add(fs.getName());
+             partVals.add(defaultPartitionValue);
+           }
+           String defaultPartitionName = FileUtils.makePartName(partCols, partVals);
+           partNames.remove(defaultPartitionName);
+           Deadline.startTimer("getAggregareStatsForAllPartitionsExceptDefault");
+           AggrStats aggrStatsAllButDefaultPartition =
+               rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
+           Deadline.stopTimer();
+           sharedCache.refreshAggregateStatsInCache(StringUtils.normalizeIdentifier(catName),
+               StringUtils.normalizeIdentifier(dbName),
+               StringUtils.normalizeIdentifier(tblName), aggrStatsAllPartitions,
+               aggrStatsAllButDefaultPartition);
+         }
+       } catch (MetaException | NoSuchObjectException e) {
+         LOG.info("Updating CachedStore: unable to read aggregate column stats of table: " + tblName,
+             e);
+       }
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return rawStore.getConf();
+   }
+ 
+   @Override
+   public void shutdown() {
+     rawStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return rawStore.openTransaction();
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return rawStore.commitTransaction();
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return rawStore.isActiveTransaction();
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+     rawStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     rawStore.createCatalog(cat);
+     sharedCache.addCatalogToCache(cat);
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+     rawStore.alterCatalog(catName, cat);
+     sharedCache.alterCatalogInCache(StringUtils.normalizeIdentifier(catName), cat);
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     if (!sharedCache.isCatalogCachePrewarmed()) {
+       return rawStore.getCatalog(catalogName);
+     }
+     Catalog cat = sharedCache.getCatalogFromCache(normalizeIdentifier(catalogName));
+     if (cat == null) {
+       throw new NoSuchObjectException();
+     }
+     return cat;
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     if (!sharedCache.isCatalogCachePrewarmed()) {
+       return rawStore.getCatalogs();
+     }
+     return sharedCache.listCachedCatalogs();
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     rawStore.dropCatalog(catalogName);
+     catalogName = catalogName.toLowerCase();
+     sharedCache.removeCatalogFromCache(catalogName);
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     rawStore.createDatabase(db);
+     sharedCache.addDatabaseToCache(db);
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String dbName) throws NoSuchObjectException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getDatabase(catName, dbName);
+     }
+     dbName = dbName.toLowerCase();
+     Database db = sharedCache.getDatabaseFromCache(StringUtils.normalizeIdentifier(catName),
+             StringUtils.normalizeIdentifier(dbName));
+     if (db == null) {
+       throw new NoSuchObjectException();
+     }
+     return db;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbName) throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.dropDatabase(catName, dbName);
+     if (succ) {
+       sharedCache.removeDatabaseFromCache(StringUtils.normalizeIdentifier(catName),
+           StringUtils.normalizeIdentifier(dbName));
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+     boolean succ = rawStore.alterDatabase(catName, dbName, db);
+     if (succ) {
+       sharedCache.alterDatabaseInCache(StringUtils.normalizeIdentifier(catName),
+           StringUtils.normalizeIdentifier(dbName), db);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getDatabases(catName, pattern);
+     }
+     return sharedCache.listCachedDatabases(catName, pattern);
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     if (!sharedCache.isDatabaseCachePrewarmed()) {
+       return rawStore.getAllDatabases(catName);
+     }
+     return sharedCache.listCachedDatabases(catName);
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return rawStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return rawStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return rawStore.dropType(typeName);
+   }
+ 
+   private void validateTableType(Table tbl) {
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     tbl.setTableType(tableType);
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     rawStore.createTable(tbl);
+     String catName = normalizeIdentifier(tbl.getCatName());
+     String dbName = normalizeIdentifier(tbl.getDbName());
+     String tblName = normalizeIdentifier(tbl.getTableName());
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     validateTableType(tbl);
+     sharedCache.addTableToCache(catName, dbName, tblName, tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tblName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropTable(catName, dbName, tblName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removeTableFromCache(catName, dbName, tblName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tblName) throws MetaException {
++    return getTable(catName, dbName, tblName, -1, null);
++  }
++
++  // TODO: if writeIdList is not null, check isolation level compliance for SVS,
++  // possibly with getTableFromCache() with table snapshot in cache.
++  @Override
++  public Table getTable(String catName, String dbName, String tblName,
++                        long txnId, String writeIdList)
++      throws MetaException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getTable(catName, dbName, tblName);
++      return rawStore.getTable(catName, dbName, tblName, txnId,writeIdList);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (tbl == null) {
++    if (tbl == null || writeIdList != null) {
+       // This table is not yet loaded in cache
+       // If the prewarm thread is working on this table's database,
+       // let's move this table to the top of tblNamesBeingPrewarmed stack,
+       // so that it gets loaded to the cache faster and is available for subsequent requests
+       tblsPendingPrewarm.prioritizeTableForPrewarm(tblName);
 -      return rawStore.getTable(catName, dbName, tblName);
++      return rawStore.getTable(catName, dbName, tblName, txnId, writeIdList);
+     }
+     if (tbl != null) {
+       tbl.unsetPrivileges();
+       tbl.setRewriteEnabled(tbl.isRewriteEnabled());
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartition(part);
+     if (succ) {
+       String dbName = normalizeIdentifier(part.getDbName());
+       String tblName = normalizeIdentifier(part.getTableName());
+       String catName = part.isSetCatName() ? normalizeIdentifier(part.getCatName()) : DEFAULT_CATALOG_NAME;
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.addPartitionToCache(catName, dbName, tblName, part);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(catName, dbName, tblName, parts);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.addPartitionsToCache(catName, dbName, tblName, parts);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec,
+       boolean ifNotExists) throws InvalidObjectException, MetaException {
+     boolean succ = rawStore.addPartitions(catName, dbName, tblName, partitionSpec, ifNotExists);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       PartitionSpecProxy.PartitionIterator iterator = partitionSpec.getPartitionIterator();
+       while (iterator.hasNext()) {
+         Partition part = iterator.next();
+         sharedCache.addPartitionToCache(catName, dbName, tblName, part);
+       }
+     }
+     return succ;
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tblName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
++    return getPartition(catName, dbName, tblName, part_vals, -1, null);
++  }
++
++  // TODO: the same as getTable()
++  @Override
++  public Partition getPartition(String catName, String dbName, String tblName,
++                                List<String> part_vals, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getPartition(catName, dbName, tblName, part_vals);
++      return rawStore.getPartition(
++          catName, dbName, tblName, part_vals, txnId, writeIdList);
+     }
+     Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, part_vals);
 -    if (part == null) {
++    if (part == null || writeIdList != null) {
+       // The table containing the partition is not yet loaded in cache
 -      return rawStore.getPartition(catName, dbName, tblName, part_vals);
++      return rawStore.getPartition(
++          catName, dbName, tblName, part_vals, txnId, writeIdList);
+     }
+     return part;
+   }
+ 
+   @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tblName,
+       List<FieldSchema> partKeys, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table containing the partition is not yet loaded in cache
+       return rawStore.doesPartitionExist(catName, dbName, tblName, partKeys, part_vals);
+     }
+     return sharedCache.existPartitionFromCache(catName, dbName, tblName, part_vals);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String dbName, String tblName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.dropPartition(catName, dbName, tblName, part_vals);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removePartitionFromCache(catName, dbName, tblName, part_vals);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     rawStore.dropPartitions(catName, dbName, tblName, partNames);
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     List<List<String>> partVals = new ArrayList<>();
+     for (String partName : partNames) {
+       partVals.add(partNameToVals(partName));
+     }
+     sharedCache.removePartitionsFromCache(catName, dbName, tblName, partVals);
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tblName, int max)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitions(catName, dbName, tblName, max);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table containing the partitions is not yet loaded in cache
+       return rawStore.getPartitions(catName, dbName, tblName, max);
+     }
+     List<Partition> parts = sharedCache.listCachedPartitions(catName, dbName, tblName, max);
+     return parts;
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return rawStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbName, String tblName, Table newTable)
 -      throws InvalidObjectException, MetaException {
 -    rawStore.alterTable(catName, dbName, tblName, newTable);
++  public void alterTable(String catName, String dbName, String tblName, Table newTable,
++      long txnId, String validWriteIds) throws InvalidObjectException, MetaException {
++    rawStore.alterTable(catName, dbName, tblName, newTable, txnId, validWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     String newTblName = normalizeIdentifier(newTable.getTableName());
+     if (!shouldCacheTable(catName, dbName, tblName) &&
+         !shouldCacheTable(catName, dbName, newTblName)) {
+       return;
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table is not yet loaded in cache
+       return;
+     }
+     if (shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is in the cache and the new table can also be cached
+       sharedCache.alterTableInCache(catName, dbName, tblName, newTable);
+     } else if (!shouldCacheTable(catName, dbName, tblName) && shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is *not* in the cache but the new table can be cached
+       sharedCache.addTableToCache(catName, dbName, newTblName, newTable);
+     } else if (shouldCacheTable(catName, dbName, tblName) && !shouldCacheTable(catName, dbName, newTblName)) {
+       // If old table is in the cache but the new table *cannot* be cached
+       sharedCache.removeTableFromCache(catName, dbName, tblName);
+     }
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+     rawStore.updateCreationMetadata(catName, dbname, tablename, cm);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTables(catName, dbName, pattern);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), pattern, (short) -1);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTables(catName, dbName, pattern, tableType);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), pattern, tableType);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getMaterializedViewsForRewriting(catName, dbName);
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                       List<String> tableTypes) throws MetaException {
+     // TODO Check if all required tables are allowed, if so, get it from cache
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getTableMeta(catName, dbNames, tableNames, tableTypes);
+     }
+     return sharedCache.getTableMeta(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbNames),
+         StringUtils.normalizeIdentifier(tableNames), tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName, List<String> tblNames)
+       throws MetaException, UnknownDBException {
+     dbName = normalizeIdentifier(dbName);
+     catName = normalizeIdentifier(catName);
+     boolean missSomeInCache = false;
+     for (String tblName : tblNames) {
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         missSomeInCache = true;
+         break;
+       }
+     }
+     if (!isCachePrewarmed.get() || missSomeInCache) {
+       return rawStore.getTableObjectsByName(catName, dbName, tblNames);
+     }
+     List<Table> tables = new ArrayList<>();
+     for (String tblName : tblNames) {
+       tblName = normalizeIdentifier(tblName);
+       Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+       if (tbl == null) {
+         tbl = rawStore.getTable(catName, dbName, tblName);
+       }
+       tables.add(tbl);
+     }
+     return tables;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.getAllTables(catName, dbName);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName));
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                              short max_tables)
+       throws MetaException, UnknownDBException {
+     if (!isBlacklistWhitelistEmpty(conf) || !isCachePrewarmed.get()) {
+       return rawStore.listTableNamesByFilter(catName, dbName, filter, max_tables);
+     }
+     return sharedCache.listCachedTableNames(StringUtils.normalizeIdentifier(catName),
+         StringUtils.normalizeIdentifier(dbName), filter, max_tables);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tblName,
+       short max_parts) throws MetaException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionNames(catName, dbName, tblName, max_parts);
+     }
+     Table tbl = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (tbl == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionNames(catName, dbName, tblName, max_parts);
+     }
+     List<String> partitionNames = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, max_parts)) {
+       if (max_parts == -1 || count < max_parts) {
+         partitionNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues()));
+       }
+     }
+     return partitionNames;
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name,
+       List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+       List<FieldSchema> order, long maxParts) throws MetaException {
+     throw new UnsupportedOperationException();
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
 -                             Partition newPart) throws InvalidObjectException, MetaException {
 -    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart);
++                             Partition newPart, long queryTxnId, String queryValidWriteIds)
++                                 throws InvalidObjectException, MetaException {
++    rawStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
+     sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, newPart);
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String dbName, String tblName,
 -                              List<List<String>> partValsList, List<Partition> newParts)
++                              List<List<String>> partValsList, List<Partition> newParts,
++                              long writeId, long txnId, String validWriteIds)
+       throws InvalidObjectException, MetaException {
 -    rawStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
++    rawStore.alterPartitions(
++        catName, dbName, tblName, partValsList, newParts, writeId, txnId, validWriteIds);
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tblName = normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return;
+     }
++    // TODO: modify the following method for the case when writeIdList != null.
+     sharedCache.alterPartitionsInCache(catName, dbName, tblName, partValsList, newParts);
+   }
+ 
+   private boolean getPartitionNamesPrunedByExprNoTxn(Table table, byte[] expr,
+       String defaultPartName, short maxParts, List<String> result, SharedCache sharedCache)
+       throws MetaException, NoSuchObjectException {
+     List<Partition> parts =
+         sharedCache.listCachedPartitions(StringUtils.normalizeIdentifier(table.getCatName()),
+             StringUtils.normalizeIdentifier(table.getDbName()),
+             StringUtils.normalizeIdentifier(table.getTableName()), maxParts);
+     for (Partition part : parts) {
+       result.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+     }
+     if (defaultPartName == null || defaultPartName.isEmpty()) {
+       defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+     }
+     return expressionProxy.filterPartitionsByExpr(table.getPartitionKeys(), expr, defaultPartName,
+         result);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+       String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts,
+           result);
+     }
+     List<String> partNames = new LinkedList<>();
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsByExpr(catName, dbName, tblName, expr, defaultPartitionName, maxParts,
+           result);
+     }
+     boolean hasUnknownPartitions = getPartitionNamesPrunedByExprNoTxn(table, expr,
+         defaultPartitionName, maxParts, partNames, sharedCache);
+     return hasUnknownPartitions;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+     }
+     String defaultPartName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+     List<String> partNames = new LinkedList<>();
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+     }
+     getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, Short.MAX_VALUE, partNames,
+         sharedCache);
+     return partNames.size();
+   }
+ 
+   private static List<String> partNameToVals(String name) {
+     if (name == null) {
+       return null;
+     }
+     List<String> vals = new ArrayList<>();
+     String[] kvp = name.split("/");
+     for (String kv : kvp) {
+       vals.add(FileUtils.unescapePathName(kv.substring(kv.indexOf('=') + 1)));
+     }
+     return vals;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     for (String partName : partNames) {
+       Partition part = sharedCache.getPartitionFromCache(catName, dbName, tblName, partNameToVals(partName));
+       if (part!=null) {
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return rawStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return rawStore.grantRole(role, userName, principalType, grantor, grantorType, grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName,
+       PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return rawStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getDBPrivilegeSet(catName, dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName,
+       String tableName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName,
+       String tableName, String partition, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return rawStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName,
+       String tableName, String partitionName, String columnName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return rawStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName, columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(
+       String principalName, PrincipalType principalType) {
+     return rawStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+     return rawStore.listPrincipalDBGrants(principalName, principalType, catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+     return rawStore.listAllTableGrants(principalName, principalType, catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName) {
+     return rawStore.listPrincipalPartitionGrants(principalName, principalType, catName, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, String columnName) {
+     return rawStore.listPrincipalTableColumnGrants(principalName, principalType, catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName,
+       String columnName) {
+     return rawStore.listPrincipalPartitionColumnGrants(principalName, principalType, catName, dbName, tableName, partValues, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return rawStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges);
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return rawStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return rawStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+       PrincipalType principalType) {
+     return rawStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return rawStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName, groupNames);
+     }
+     Partition p = sharedCache.getPartitionFromCache(catName, dbName, tblName, partVals);
+     if (p != null) {
+       String partName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+       PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName,
+           userName, groupNames);
+       p.setPrivileges(privs);
+     }
+     return p;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName, groupNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs = getPartitionPrivilegeSet(catName, dbName, tblName, partName,
+             userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+         count++;
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+     }
+     List<String> partNames = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       boolean psMatch = true;
+       for (int i=0;i<partVals.size();i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal!=null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         partNames.add(Warehouse.makePartName(table.getPartitionKeys(), part.getValues()));
+         count++;
+       }
+     }
+     return partNames;
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
+       return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName,
+           groupNames);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+     if (table == null) {
+       // The table is not yet loaded in cache
+       return rawStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts, userName,
+           groupNames);
+     }
+     List<Partition> partitions = new ArrayList<>();
+     int count = 0;
+     for (Partition part : sharedCache.listCachedPartitions(catName, dbName, tblName, maxParts)) {
+       boolean psMatch = true;
+       for (int i = 0; i < partVals.size(); i++) {
+         String psVal = partVals.get(i);
+         String partVal = part.getValues().get(i);
+         if (psVal != null && !psVal.isEmpty() && !psVal.equals(partVal)) {
+           psMatch = false;
+           break;
+         }
+       }
+       if (!psMatch) {
+         continue;
+       }
+       if (maxParts == -1 || count < maxParts) {
+         String partName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+         PrincipalPrivilegeSet privs =
+             getPartitionPrivilegeSet(catName, dbName, tblName, partName, userName, groupNames);
+         part.setPrivileges(privs);
+         partitions.add(part);
+       }
+     }
+     return partitions;
+   }
+ 
+   @Override
+   public boolean updateTableColumnStatistics(ColumnStatistics colStats)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.updateTableColumnStatistics(colStats);
+     if (succ) {
+       String catName = colStats.getStatsDesc().isSetCatName() ?
+           normalizeIdentifier(colStats.getStatsDesc().getCatName()) :
+           getDefaultCatalog(conf);
+       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
+       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
+       if (table == null) {
+         // The table is not yet loaded in cache
+         return succ;
+       }
+       List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+       List<String> colNames = new ArrayList<>();
+       for (ColumnStatisticsObj statsObj : statsObjs) {
+         colNames.add(statsObj.getColName());
+       }
+       StatsSetupConst.setColumnStatsState(table.getParameters(), colNames);
+       sharedCache.alterTableInCache(catName, dbName, tblName, table);
+       sharedCache.updateTableColStatsInCache(catName, dbName, tblName, statsObjs);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tblName,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
++    return getTableColumnStatistics(catName, dbName, tblName, colNames, -1, null);
++  }
++
++  // TODO: the same as getTable()
++  @Override
++  public ColumnStatistics getTableColumnStatistics(
++      String catName, String dbName, String tblName, List<String> colNames,
++      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     catName = StringUtils.normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
++      return rawStore.getTableColumnStatistics(
++          catName, dbName, tblName, colNames, txnId, writeIdList);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (table == null) {
++    if (table == null || writeIdList != null) {
+       // The table is not yet loaded in cache
 -      return rawStore.getTableColumnStatistics(catName, dbName, tblName, colNames);
++      return rawStore.getTableColumnStatistics(
++          catName, dbName, tblName, colNames, txnId, writeIdList);
+     }
+     ColumnStatisticsDesc csd = new ColumnStatisticsDesc(true, dbName, tblName);
+     List<ColumnStatisticsObj> colStatObjs =
+         sharedCache.getTableColStatsFromCache(catName, dbName, tblName, colNames);
+     return new ColumnStatistics(csd, colStatObjs);
+   }
+ 
+   @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tblName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, tblName, colName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, colName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics colStats, List<String> partVals)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ = rawStore.updatePartitionColumnStatistics(colStats, partVals);
+     if (succ) {
+       String catName = colStats.getStatsDesc().isSetCatName() ?
+           normalizeIdentifier(colStats.getStatsDesc().getCatName()) : DEFAULT_CATALOG_NAME;
+       String dbName = normalizeIdentifier(colStats.getStatsDesc().getDbName());
+       String tblName = normalizeIdentifier(colStats.getStatsDesc().getTableName());
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+       Partition part = getPartition(catName, dbName, tblName, partVals);
+       List<String> colNames = new ArrayList<>();
+       for (ColumnStatisticsObj statsObj : statsObjs) {
+         colNames.add(statsObj.getColName());
+       }
+       StatsSetupConst.setColumnStatsState(part.getParameters(), colNames);
+       sharedCache.alterPartitionInCache(catName, dbName, tblName, partVals, part);
+       sharedCache.updatePartitionColStatsInCache(catName, dbName, tblName, partVals, colStats.getStatsObj());
+     }
+     return succ;
+   }
+ 
+   @Override
+   // TODO: calculate from cached values.
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName, String tblName,
+       List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException {
+     return rawStore.getPartitionColumnStatistics(catName, dbName, tblName, partNames, colNames);
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return rawStore.getPartitionColumnStatistics(
++        catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tblName, String partName,
+       List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     boolean succ =
+         rawStore.deletePartitionColumnStatistics(catName, dbName, tblName, partName, partVals, colName);
+     if (succ) {
+       catName = normalizeIdentifier(catName);
+       dbName = normalizeIdentifier(dbName);
+       tblName = normalizeIdentifier(tblName);
+       if (!shouldCacheTable(catName, dbName, tblName)) {
+         return succ;
+       }
+       sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, partVals, colName);
+     }
+     return succ;
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName, List<String> partNames,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
++    return get_aggr_stats_for(catName, dbName, tblName, partNames, colNames, -1, null);
++  }
++
++  @Override
++  // TODO: the same as getTable() for transactional stats.
++  public AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
++                                      List<String> partNames, List<String> colNames,
++                                      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
+     List<ColumnStatisticsObj> colStats;
+     catName = normalizeIdentifier(catName);
+     dbName = StringUtils.normalizeIdentifier(dbName);
+     tblName = StringUtils.normalizeIdentifier(tblName);
+     if (!shouldCacheTable(catName, dbName, tblName)) {
 -      rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
++      rawStore.get_aggr_stats_for(
++          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+     }
+     Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
 -    if (table == null) {
++    if (table == null || writeIdList != null) {
+       // The table is not yet loaded in cache
 -      return rawStore.get_aggr_stats_for(catName, dbName, tblName, partNames, colNames);
++      return rawStore.get_aggr_stats_for(
++          catName, dbName, tblName, partNames, colNames, txnId, writeIdList);
+     }
+     List<String> allPartNames = rawStore.listPartitionNames(catName, dbName, tblName, (short) -1);
+     if (partNames.size() == allPartNames.size()) {
+       colStats = sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALL);
+       if (colStats != null) {
+         return new AggrStats(colStats, partNames.size());
+       }
+     } else if (partNames.size() == (allPartNames.size() - 1)) {
+       String defaultPartitionName = MetastoreConf.getVar(getConf(), ConfVars.DEFAULTPARTITIONNAME);
+       if (!partNames.contains(defaultPartitionName)) {
+         colStats =
+             sharedCache.getAggrStatsFromCache(catName, dbName, tblName, colNames, StatsType.ALLBUTDEFAULT);
+         if (colStats != null) {
+           return new AggrStats(colStats, partNames.size());
+         }
+       }
+     }
+     LOG.debug("Didn't find aggr stats in cache. Merging them. tblName= {}, parts= {}, cols= {}",
+         tblName, partNames, colNames);
+     MergedColumnStatsForPartitions mergedColStats =
+         mergeColStatsForPartitions(catName, dbName, tblName, partNames, colNames, sharedCache);
+     return new AggrStats(mergedColStats.getColStats(), mergedColStats.getPartsFound());
+   }
+ 
+   private MergedColumnStatsForPartitions mergeColStatsForPartitions(
+       String catName, String dbName, String tblName, List<String> partNames, List<String> colNames,
+       SharedCache sharedCache) throws MetaException {
+     final boolean useDensityFunctionForNDVEstimation =
+         MetastoreConf.getBoolVar(getConf(), ConfVars.STATS_NDV_DENSITY_FUNCTION);
+     final double ndvTuner = MetastoreConf.getDoubleVar(getConf(), ConfVars.STATS_NDV_TUNER);
+     Map<ColumnStatsAggregator, List<ColStatsObjWithSourceInfo>> colStatsMap = new HashMap<>();
+     boolean areAllPartsFound = true;
+     long partsFound = 0;
+     for (String colName : colNames) {
+       long partsFoundForColumn = 0;
+       ColumnStatsAggregator colStatsAggregator = null;
+       List<ColStatsObjWithSourceInfo> colStatsWithPartInfoList = new ArrayList<>();
+       for (String partName : partNames) {
+         ColumnStatisticsObj colStatsForPart =
+             sharedCache.getPartitionColStatsFromCache(catName, dbName, tblName, partNameToVals(partName), colName);
+         if (colStatsForPart != null) {
+           ColStatsObjWithSourceInfo colStatsWithPartInfo =
+               new ColStatsObjWithSourceInfo(colStatsForPart, catName, dbName, tblName, partName);
+           colStatsWithPartInfoList.add(colStatsWithPartInfo);
+           if (colStatsAggregator == null) {
+             colStatsAggregator = ColumnStatsAggregatorFactory.getColumnStatsAggregator(
+                 colStatsForPart.getStatsData().getSetField(), useDensityFunctionForNDVEstimation,
+                 ndvTuner);
+           }
+           partsFoundForColumn++;
+         } else {
+           LOG.debug(
+               "Stats not found in CachedStore for: dbName={} tblName={} partName={} colName={}",
+               dbName, tblName, partName, colName);
+         }
+       }
+       if (colStatsWithPartInfoList.size() > 0) {
+         colStatsMap.put(colStatsAggregator, colStatsWithPartInfoList);
+       }
+       if (partsFoundForColumn == partNames.size()) {
+         partsFound++;
+       }
+       if (colStatsMap.size() < 1) {
+         LOG.debug("No stats data found for: dbName={} tblName= {} partNames= {} colNames= ", dbName,
+             tblName, partNames, colNames);
+         return new MergedColumnStatsForPartitions(new ArrayList<ColumnStatisticsObj>(), 0);
+       }
+     }
+     // Note that enableBitVector does not apply here because ColumnStatisticsObj
+     // itself will tell whether bitvector is null or not and aggr logic can automatically apply.
+     return new MergedColumnStatsForPartitions(MetaStoreUtils.aggrPartitionStats(colStatsMap,
+         partNames, areAllPartsFound, useDensityFunctionForNDVEstimation, ndvTuner), partsFound);
+   }
+ 
+   class MergedColumnStatsForPartitions {
+     List<ColumnStatisticsObj> colStats = new ArrayList<ColumnStatisticsObj>();
+     long partsFound;
+ 
+     MergedColumnStatsForPartitions(List<ColumnStatisticsObj> colStats, long partsFound) {
+       this.colStats = colStats;
+       this.partsFound = partsFound;
+     }
+ 
+     List<ColumnStatisticsObj> getColStats() {
+       return colStats;
+     }
+ 
+     long getPartsFound() {
+       return partsFound;
+     }
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return rawStore.cleanupEvents();
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return rawStore.addToken(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return rawStore.removeToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return rawStore.getToken(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return rawStore.getAllTokenIdentifiers();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException {
+     return rawStore.addMasterKey(key);
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+       throws NoSuchObjectException, MetaException {
+     rawStore.updateMasterKey(seqNo, key);
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return rawStore.removeMasterKey(keySeq);
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return rawStore.getMasterKeys();
+   }
+ 
+   @Overri

<TRUNCATED>

[56/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
index 0000000,9b79446..2587a98
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java
@@@ -1,0 -1,1212 +1,1247 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.ISchemaName;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+ 
+ import java.nio.ByteBuffer;
+ import java.util.Collections;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
+ import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.thrift.TException;
+ import org.junit.Assert;
+ 
+ /**
+  *
+  * DummyRawStoreForJdoConnection.
+  *
+  * An implementation of RawStore that verifies the DummyJdoConnectionUrlHook has already been
+  * applied when this class's setConf method is called, by checking that the value of the
+  * METASTORECONNECTURLKEY ConfVar has been updated.
+  *
+  * All non-void methods return default values.
+  */
+ public class DummyRawStoreForJdoConnection implements RawStore {
+ 
+   @Override
+   public Configuration getConf() {
+ 
+     return null;
+   }
+ 
+   @Override
+   public void setConf(Configuration arg0) {
+     String expected = DummyJdoConnectionUrlHook.newUrl;
+     String actual = MetastoreConf.getVar(arg0, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+ 
+     Assert.assertEquals("The expected URL used by JDO to connect to the metastore: " + expected +
+         " did not match the actual value when the Raw Store was initialized: " + actual,
+         expected, actual);
+   }
+ 
+   @Override
+   public void shutdown() {
+ 
+ 
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     return false;
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return false;
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+ 
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+ 
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+ 
+ 
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String name) throws NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbname) throws NoSuchObjectException, MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbname, Database db) throws NoSuchObjectException,
+       MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+ 
+     return false;
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+ 
+ 
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName) throws MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
++  public Table getTable(String catalogName, String dbName, String tableName,
++                        long txnid, String writeIdList) throws MetaException {
++    return null;
++  }
++
++  @Override
+   public boolean addPartition(Partition part) throws InvalidObjectException, MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals)
+       throws MetaException, NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
++  public Partition getPartition(String catName, String dbName, String tableName, List<String> part_vals,
++                                long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public boolean dropPartition(String catName, String dbName, String tableName, List<String> part_vals)
+       throws MetaException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tableName, int max)
+       throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return Collections.emptyMap();
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbname, String name, Table newTable)
++  public void alterTable(String catName, String dbname, String name, Table newTable, long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames, List<String> tableTypes)
+       throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbname, List<String> tableNames)
+       throws MetaException, UnknownDBException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter, short max_tables)
+       throws MetaException, UnknownDBException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String db_name, String tbl_name, short max_parts)
+       throws MetaException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name,
+                                                      String tbl_name, List<FieldSchema> cols,
+                                                      boolean applyDistinct, String filter,
+                                                      boolean ascending, List<FieldSchema> order,
+                                                      long maxParts) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
 -      Partition new_part) throws InvalidObjectException, MetaException {
++      Partition new_part, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String db_name, String tbl_name,
 -                              List<List<String>> part_vals_list, List<Partition> new_parts)
 -      throws InvalidObjectException, MetaException {
 -
 -
++                              List<List<String>> part_vals_list, List<Partition> new_parts,
++                              long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+                                                String filter, short maxParts)
+       throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     return false;
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+     throws MetaException, NoSuchObjectException {
+     return -1;
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException {
+     return -1;
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName, Map<String, String> partVals,
+       PartitionEventType evtType) throws MetaException, UnknownTableException,
+       InvalidPartitionException, UnknownPartitionException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType) throws MetaException,
+       UnknownTableException, InvalidPartitionException, UnknownPartitionException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName) throws MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName, PrincipalType principalType, String grantor,
+       PrincipalType grantorType, boolean grantOption) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName,
+       String userName, List<String> groupNames) throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames) throws InvalidObjectException,
+       MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName,
+       String partitionName, String columnName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partValues,
+       String partName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, String columnName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partVals,
+       String partName, String columnName) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+ 
+     return false;
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return false;
+   }
+ 
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName, PrincipalType principalType) {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return null;
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName, List<String> partVals,
+       String user_name, List<String> group_names) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return null;
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName, short maxParts,
+       String userName, List<String> groupNames) throws MetaException, NoSuchObjectException,
+       InvalidObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String db_name, String tbl_name, List<String> part_vals,
+       short max_parts) throws MetaException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+ 
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+ 
+     return 0;
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return false;
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return false;
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return null;
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) {
+     return 0;
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key) {
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return false;
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return new String[0];
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String catName, String dbName, String tableName, String partitionName, String columnName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String catName, String dbName, String tableName, String partitionName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String catName, String dbName, String tableName, String columnName) {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public  ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+       List<String> colName) throws MetaException, NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
++  public ColumnStatistics getTableColumnStatistics(
++      String catName, String dbName, String tableName, List<String> colName,
++      long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+     String partName, List<String> partVals, String colName)
+     throws NoSuchObjectException, MetaException, InvalidObjectException,
+     InvalidInputException {
+     return false;
+ 
+   }
+ 
+   @Override
+   public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+       throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,List<String> partVals)
+     throws NoSuchObjectException, MetaException, InvalidObjectException {
+     return false;
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String version, String comment) throws MetaException {
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+       String tblName, List<String> colNames, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return Collections.emptyList();
++  }
++
++  @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return false;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames) {
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException,
+       MetaException {
+   }
+ 
+   @Override
+   public void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException {
+   }
+ 
+   @Override
+   public void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+   }
+ 
+   @Override
+   public Function getFunction(String catName, String dbName, String funcName)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions(String catName)
+           throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String catName, String dbName, String pattern)
+       throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
++  public AggrStats get_aggr_stats_for(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnid, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return null;
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) throws MetaException {
+ 
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+ 
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return null;
+   }
+ 
+   @Override
+   public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+     return null;
+   }
+ 
+   @Override
+   public void flushCache() {
+ 
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+     return null;
+   }
+ 
+   @Override
+   public void putFileMetadata(
+       List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return false;
+   }
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return null;
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName,
+   String constraintName, boolean missingOk) throws NoSuchObjectException {
+     // TODO Auto-generated method stub
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addCheckConstraints(List<SQLCheckConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public String getMetastoreDbUuid() throws MetaException {
+     throw new MetaException("Get metastore uuid is not implemented");
+   }
+ 
+   @Override
+   public void createResourcePlan(
+       WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize) throws MetaException {
+   }
+ 
+   @Override
+   public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMFullResourcePlan alterResourcePlan(
+       String name, WMNullableResourcePlan resourcePlan, boolean canActivateDisabled, boolean canDeactivate,
+       boolean isReplace)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+   }
+ 
+   @Override
+   public void createWMTrigger(WMTrigger trigger) throws MetaException {
+   }
+ 
+   @Override
+   public void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, MetaException {
+   }
+ 
+   @Override
+   public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+       MetaException {
+   }
+ 
+   @Override
+   public void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+   }
+ 
+   @Override
+   public List<MetaStoreUtils.ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException,
+       MetaException {
+ 
+   }
+ 
+   @Override
+   public ISchema getISchema(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void addSchemaVersion(SchemaVersion schemaVersion) throws
+       AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws
+       NoSuchObjectException, MetaException {
+ 
+   }
+ 
+   @Override
+   public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                         String type) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException,
+       MetaException {
+ 
+   }
+ 
+   @Override
+   public SerDeInfo getSerDeInfo(String serDeName) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+ 
+   }
+ 
+   @Override
+   public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+   }
+ 
+   @Override
+   public List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
+     return 0;
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName,
+       String dbName, String tableName) throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public void cleanWriteNotificationEvents(int olderThan) {
+   }
+ 
+   @Override
+   public List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException {
+     return null;
+   }
+ }


[36/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
new file mode 100644
index 0000000..44a57f2
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsDesc.java
@@ -0,0 +1,904 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ColumnStatisticsDesc implements org.apache.thrift.TBase<ColumnStatisticsDesc, ColumnStatisticsDesc._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatisticsDesc> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsDesc");
+
+  private static final org.apache.thrift.protocol.TField IS_TBL_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("isTblLevel", org.apache.thrift.protocol.TType.BOOL, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField LAST_ANALYZED_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAnalyzed", org.apache.thrift.protocol.TType.I64, (short)5);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ColumnStatisticsDescStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ColumnStatisticsDescTupleSchemeFactory());
+  }
+
+  private boolean isTblLevel; // required
+  private String dbName; // required
+  private String tableName; // required
+  private String partName; // optional
+  private long lastAnalyzed; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    IS_TBL_LEVEL((short)1, "isTblLevel"),
+    DB_NAME((short)2, "dbName"),
+    TABLE_NAME((short)3, "tableName"),
+    PART_NAME((short)4, "partName"),
+    LAST_ANALYZED((short)5, "lastAnalyzed"),
+    CAT_NAME((short)6, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // IS_TBL_LEVEL
+          return IS_TBL_LEVEL;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TABLE_NAME
+          return TABLE_NAME;
+        case 4: // PART_NAME
+          return PART_NAME;
+        case 5: // LAST_ANALYZED
+          return LAST_ANALYZED;
+        case 6: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISTBLLEVEL_ISSET_ID = 0;
+  private static final int __LASTANALYZED_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.PART_NAME,_Fields.LAST_ANALYZED,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.IS_TBL_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("isTblLevel", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PART_NAME, new org.apache.thrift.meta_data.FieldMetaData("partName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.LAST_ANALYZED, new org.apache.thrift.meta_data.FieldMetaData("lastAnalyzed", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatisticsDesc.class, metaDataMap);
+  }
+
+  public ColumnStatisticsDesc() {
+  }
+
+  public ColumnStatisticsDesc(
+    boolean isTblLevel,
+    String dbName,
+    String tableName)
+  {
+    this();
+    this.isTblLevel = isTblLevel;
+    setIsTblLevelIsSet(true);
+    this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(dbName);
+    this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(tableName);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ColumnStatisticsDesc(ColumnStatisticsDesc other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.isTblLevel = other.isTblLevel;
+    if (other.isSetDbName()) {
+      this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.dbName);
+    }
+    if (other.isSetTableName()) {
+      this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.tableName);
+    }
+    if (other.isSetPartName()) {
+      this.partName = other.partName;
+    }
+    this.lastAnalyzed = other.lastAnalyzed;
+    if (other.isSetCatName()) {
+      this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName);
+    }
+  }
+
+  public ColumnStatisticsDesc deepCopy() {
+    return new ColumnStatisticsDesc(this);
+  }
+
+  @Override
+  public void clear() {
+    setIsTblLevelIsSet(false);
+    this.isTblLevel = false;
+    this.dbName = null;
+    this.tableName = null;
+    this.partName = null;
+    setLastAnalyzedIsSet(false);
+    this.lastAnalyzed = 0;
+    this.catName = null;
+  }
+
+  public boolean isIsTblLevel() {
+    return this.isTblLevel;
+  }
+
+  public void setIsTblLevel(boolean isTblLevel) {
+    this.isTblLevel = isTblLevel;
+    setIsTblLevelIsSet(true);
+  }
+
+  public void unsetIsTblLevel() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISTBLLEVEL_ISSET_ID);
+  }
+
+  /** Returns true if field isTblLevel is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsTblLevel() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISTBLLEVEL_ISSET_ID);
+  }
+
+  public void setIsTblLevelIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISTBLLEVEL_ISSET_ID, value);
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(dbName);
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(tableName);
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public String getPartName() {
+    return this.partName;
+  }
+
+  public void setPartName(String partName) {
+    this.partName = partName;
+  }
+
+  public void unsetPartName() {
+    this.partName = null;
+  }
+
+  /** Returns true if field partName is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartName() {
+    return this.partName != null;
+  }
+
+  public void setPartNameIsSet(boolean value) {
+    if (!value) {
+      this.partName = null;
+    }
+  }
+
+  public long getLastAnalyzed() {
+    return this.lastAnalyzed;
+  }
+
+  public void setLastAnalyzed(long lastAnalyzed) {
+    this.lastAnalyzed = lastAnalyzed;
+    setLastAnalyzedIsSet(true);
+  }
+
+  public void unsetLastAnalyzed() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LASTANALYZED_ISSET_ID);
+  }
+
+  /** Returns true if field lastAnalyzed is set (has been assigned a value) and false otherwise */
+  public boolean isSetLastAnalyzed() {
+    return EncodingUtils.testBit(__isset_bitfield, __LASTANALYZED_ISSET_ID);
+  }
+
+  public void setLastAnalyzedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTANALYZED_ISSET_ID, value);
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(catName);
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case IS_TBL_LEVEL:
+      if (value == null) {
+        unsetIsTblLevel();
+      } else {
+        setIsTblLevel((Boolean)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case PART_NAME:
+      if (value == null) {
+        unsetPartName();
+      } else {
+        setPartName((String)value);
+      }
+      break;
+
+    case LAST_ANALYZED:
+      if (value == null) {
+        unsetLastAnalyzed();
+      } else {
+        setLastAnalyzed((Long)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case IS_TBL_LEVEL:
+      return isIsTblLevel();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case PART_NAME:
+      return getPartName();
+
+    case LAST_ANALYZED:
+      return getLastAnalyzed();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case IS_TBL_LEVEL:
+      return isSetIsTblLevel();
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case PART_NAME:
+      return isSetPartName();
+    case LAST_ANALYZED:
+      return isSetLastAnalyzed();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ColumnStatisticsDesc)
+      return this.equals((ColumnStatisticsDesc)that);
+    return false;
+  }
+
+  public boolean equals(ColumnStatisticsDesc that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_isTblLevel = true;
+    boolean that_present_isTblLevel = true;
+    if (this_present_isTblLevel || that_present_isTblLevel) {
+      if (!(this_present_isTblLevel && that_present_isTblLevel))
+        return false;
+      if (this.isTblLevel != that.isTblLevel)
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_partName = true && this.isSetPartName();
+    boolean that_present_partName = true && that.isSetPartName();
+    if (this_present_partName || that_present_partName) {
+      if (!(this_present_partName && that_present_partName))
+        return false;
+      if (!this.partName.equals(that.partName))
+        return false;
+    }
+
+    boolean this_present_lastAnalyzed = true && this.isSetLastAnalyzed();
+    boolean that_present_lastAnalyzed = true && that.isSetLastAnalyzed();
+    if (this_present_lastAnalyzed || that_present_lastAnalyzed) {
+      if (!(this_present_lastAnalyzed && that_present_lastAnalyzed))
+        return false;
+      if (this.lastAnalyzed != that.lastAnalyzed)
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_isTblLevel = true;
+    list.add(present_isTblLevel);
+    if (present_isTblLevel)
+      list.add(isTblLevel);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_partName = true && (isSetPartName());
+    list.add(present_partName);
+    if (present_partName)
+      list.add(partName);
+
+    boolean present_lastAnalyzed = true && (isSetLastAnalyzed());
+    list.add(present_lastAnalyzed);
+    if (present_lastAnalyzed)
+      list.add(lastAnalyzed);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ColumnStatisticsDesc other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetIsTblLevel()).compareTo(other.isSetIsTblLevel());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsTblLevel()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isTblLevel, other.isTblLevel);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartName()).compareTo(other.isSetPartName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partName, other.partName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetLastAnalyzed()).compareTo(other.isSetLastAnalyzed());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLastAnalyzed()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lastAnalyzed, other.lastAnalyzed);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ColumnStatisticsDesc(");
+    boolean first = true;
+
+    sb.append("isTblLevel:");
+    sb.append(this.isTblLevel);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (isSetPartName()) {
+      if (!first) sb.append(", ");
+      sb.append("partName:");
+      if (this.partName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partName);
+      }
+      first = false;
+    }
+    if (isSetLastAnalyzed()) {
+      if (!first) sb.append(", ");
+      sb.append("lastAnalyzed:");
+      sb.append(this.lastAnalyzed);
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetIsTblLevel()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'isTblLevel' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTableName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ColumnStatisticsDescStandardSchemeFactory implements SchemeFactory {
+    public ColumnStatisticsDescStandardScheme getScheme() {
+      return new ColumnStatisticsDescStandardScheme();
+    }
+  }
+
+  private static class ColumnStatisticsDescStandardScheme extends StandardScheme<ColumnStatisticsDesc> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatisticsDesc struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // IS_TBL_LEVEL
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isTblLevel = iprot.readBool();
+              struct.setIsTblLevelIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PART_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.partName = iprot.readString();
+              struct.setPartNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // LAST_ANALYZED
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.lastAnalyzed = iprot.readI64();
+              struct.setLastAnalyzedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatisticsDesc struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(IS_TBL_LEVEL_FIELD_DESC);
+      oprot.writeBool(struct.isTblLevel);
+      oprot.writeFieldEnd();
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partName != null) {
+        if (struct.isSetPartName()) {
+          oprot.writeFieldBegin(PART_NAME_FIELD_DESC);
+          oprot.writeString(struct.partName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetLastAnalyzed()) {
+        oprot.writeFieldBegin(LAST_ANALYZED_FIELD_DESC);
+        oprot.writeI64(struct.lastAnalyzed);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ColumnStatisticsDescTupleSchemeFactory implements SchemeFactory {
+    public ColumnStatisticsDescTupleScheme getScheme() {
+      return new ColumnStatisticsDescTupleScheme();
+    }
+  }
+
+  private static class ColumnStatisticsDescTupleScheme extends TupleScheme<ColumnStatisticsDesc> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDesc struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBool(struct.isTblLevel);
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tableName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetLastAnalyzed()) {
+        optionals.set(1);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetPartName()) {
+        oprot.writeString(struct.partName);
+      }
+      if (struct.isSetLastAnalyzed()) {
+        oprot.writeI64(struct.lastAnalyzed);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsDesc struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.isTblLevel = iprot.readBool();
+      struct.setIsTblLevelIsSet(true);
+      struct.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+      struct.setDbNameIsSet(true);
+      struct.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+      struct.setTableNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.partName = iprot.readString();
+        struct.setPartNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.lastAnalyzed = iprot.readI64();
+        struct.setLastAnalyzedIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
new file mode 100644
index 0000000..6f9a57f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsObj.java
@@ -0,0 +1,593 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ColumnStatisticsObj implements org.apache.thrift.TBase<ColumnStatisticsObj, ColumnStatisticsObj._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatisticsObj> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsObj");
+
+  private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("colName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField COL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("colType", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField STATS_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("statsData", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ColumnStatisticsObjStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ColumnStatisticsObjTupleSchemeFactory());
+  }
+
+  private String colName; // required
+  private String colType; // required
+  private ColumnStatisticsData statsData; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    COL_NAME((short)1, "colName"),
+    COL_TYPE((short)2, "colType"),
+    STATS_DATA((short)3, "statsData");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // COL_NAME
+          return COL_NAME;
+        case 2: // COL_TYPE
+          return COL_TYPE;
+        case 3: // STATS_DATA
+          return STATS_DATA;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("colName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COL_TYPE, new org.apache.thrift.meta_data.FieldMetaData("colType", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.STATS_DATA, new org.apache.thrift.meta_data.FieldMetaData("statsData", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsData.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatisticsObj.class, metaDataMap);
+  }
+
+  public ColumnStatisticsObj() {
+  }
+
+  public ColumnStatisticsObj(
+    String colName,
+    String colType,
+    ColumnStatisticsData statsData)
+  {
+    this();
+    this.colName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(colName);
+    this.colType = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(colType);
+    this.statsData = statsData;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ColumnStatisticsObj(ColumnStatisticsObj other) {
+    if (other.isSetColName()) {
+      this.colName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.colName);
+    }
+    if (other.isSetColType()) {
+      this.colType = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.colType);
+    }
+    if (other.isSetStatsData()) {
+      this.statsData = new ColumnStatisticsData(other.statsData);
+    }
+  }
+
+  public ColumnStatisticsObj deepCopy() {
+    return new ColumnStatisticsObj(this);
+  }
+
+  @Override
+  public void clear() {
+    this.colName = null;
+    this.colType = null;
+    this.statsData = null;
+  }
+
+  public String getColName() {
+    return this.colName;
+  }
+
+  public void setColName(String colName) {
+    this.colName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(colName);
+  }
+
+  public void unsetColName() {
+    this.colName = null;
+  }
+
+  /** Returns true if field colName is set (has been assigned a value) and false otherwise */
+  public boolean isSetColName() {
+    return this.colName != null;
+  }
+
+  public void setColNameIsSet(boolean value) {
+    if (!value) {
+      this.colName = null;
+    }
+  }
+
+  public String getColType() {
+    return this.colType;
+  }
+
+  public void setColType(String colType) {
+    this.colType = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(colType);
+  }
+
+  public void unsetColType() {
+    this.colType = null;
+  }
+
+  /** Returns true if field colType is set (has been assigned a value) and false otherwise */
+  public boolean isSetColType() {
+    return this.colType != null;
+  }
+
+  public void setColTypeIsSet(boolean value) {
+    if (!value) {
+      this.colType = null;
+    }
+  }
+
+  public ColumnStatisticsData getStatsData() {
+    return this.statsData;
+  }
+
+  public void setStatsData(ColumnStatisticsData statsData) {
+    this.statsData = statsData;
+  }
+
+  public void unsetStatsData() {
+    this.statsData = null;
+  }
+
+  /** Returns true if field statsData is set (has been assigned a value) and false otherwise */
+  public boolean isSetStatsData() {
+    return this.statsData != null;
+  }
+
+  public void setStatsDataIsSet(boolean value) {
+    if (!value) {
+      this.statsData = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case COL_NAME:
+      if (value == null) {
+        unsetColName();
+      } else {
+        setColName((String)value);
+      }
+      break;
+
+    case COL_TYPE:
+      if (value == null) {
+        unsetColType();
+      } else {
+        setColType((String)value);
+      }
+      break;
+
+    case STATS_DATA:
+      if (value == null) {
+        unsetStatsData();
+      } else {
+        setStatsData((ColumnStatisticsData)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case COL_NAME:
+      return getColName();
+
+    case COL_TYPE:
+      return getColType();
+
+    case STATS_DATA:
+      return getStatsData();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case COL_NAME:
+      return isSetColName();
+    case COL_TYPE:
+      return isSetColType();
+    case STATS_DATA:
+      return isSetStatsData();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ColumnStatisticsObj)
+      return this.equals((ColumnStatisticsObj)that);
+    return false;
+  }
+
+  public boolean equals(ColumnStatisticsObj that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_colName = true && this.isSetColName();
+    boolean that_present_colName = true && that.isSetColName();
+    if (this_present_colName || that_present_colName) {
+      if (!(this_present_colName && that_present_colName))
+        return false;
+      if (!this.colName.equals(that.colName))
+        return false;
+    }
+
+    boolean this_present_colType = true && this.isSetColType();
+    boolean that_present_colType = true && that.isSetColType();
+    if (this_present_colType || that_present_colType) {
+      if (!(this_present_colType && that_present_colType))
+        return false;
+      if (!this.colType.equals(that.colType))
+        return false;
+    }
+
+    boolean this_present_statsData = true && this.isSetStatsData();
+    boolean that_present_statsData = true && that.isSetStatsData();
+    if (this_present_statsData || that_present_statsData) {
+      if (!(this_present_statsData && that_present_statsData))
+        return false;
+      if (!this.statsData.equals(that.statsData))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_colName = true && (isSetColName());
+    list.add(present_colName);
+    if (present_colName)
+      list.add(colName);
+
+    boolean present_colType = true && (isSetColType());
+    list.add(present_colType);
+    if (present_colType)
+      list.add(colType);
+
+    boolean present_statsData = true && (isSetStatsData());
+    list.add(present_statsData);
+    if (present_statsData)
+      list.add(statsData);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ColumnStatisticsObj other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetColName()).compareTo(other.isSetColName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colName, other.colName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColType()).compareTo(other.isSetColType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colType, other.colType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetStatsData()).compareTo(other.isSetStatsData());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetStatsData()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statsData, other.statsData);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ColumnStatisticsObj(");
+    boolean first = true;
+
+    sb.append("colName:");
+    if (this.colName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.colName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("colType:");
+    if (this.colType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.colType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("statsData:");
+    if (this.statsData == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.statsData);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetColName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'colName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetColType()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'colType' is unset! Struct:" + toString());
+    }
+
+    if (!isSetStatsData()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'statsData' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ColumnStatisticsObjStandardSchemeFactory implements SchemeFactory {
+    public ColumnStatisticsObjStandardScheme getScheme() {
+      return new ColumnStatisticsObjStandardScheme();
+    }
+  }
+
+  private static class ColumnStatisticsObjStandardScheme extends StandardScheme<ColumnStatisticsObj> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatisticsObj struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // COL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.colName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setColNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // COL_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.colType = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setColTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // STATS_DATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.statsData = new ColumnStatisticsData();
+              struct.statsData.read(iprot);
+              struct.setStatsDataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatisticsObj struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.colName != null) {
+        oprot.writeFieldBegin(COL_NAME_FIELD_DESC);
+        oprot.writeString(struct.colName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.colType != null) {
+        oprot.writeFieldBegin(COL_TYPE_FIELD_DESC);
+        oprot.writeString(struct.colType);
+        oprot.writeFieldEnd();
+      }
+      if (struct.statsData != null) {
+        oprot.writeFieldBegin(STATS_DATA_FIELD_DESC);
+        struct.statsData.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ColumnStatisticsObjTupleSchemeFactory implements SchemeFactory {
+    public ColumnStatisticsObjTupleScheme getScheme() {
+      return new ColumnStatisticsObjTupleScheme();
+    }
+  }
+
+  private static class ColumnStatisticsObjTupleScheme extends TupleScheme<ColumnStatisticsObj> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsObj struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.colName);
+      oprot.writeString(struct.colType);
+      struct.statsData.write(oprot);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatisticsObj struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.colName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+      struct.setColNameIsSet(true);
+      struct.colType = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+      struct.setColTypeIsSet(true);
+      struct.statsData = new ColumnStatisticsData();
+      struct.statsData.read(iprot);
+      struct.setStatsDataIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
new file mode 100644
index 0000000..f295958
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CommitTxnRequest.java
@@ -0,0 +1,657 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CommitTxnRequest implements org.apache.thrift.TBase<CommitTxnRequest, CommitTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CommitTxnRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CommitTxnRequest");
+
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField WRITE_EVENT_INFOS_FIELD_DESC = new org.apache.thrift.protocol.TField("writeEventInfos", org.apache.thrift.protocol.TType.LIST, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CommitTxnRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CommitTxnRequestTupleSchemeFactory());
+  }
+
+  private long txnid; // required
+  private String replPolicy; // optional
+  private List<WriteEventInfo> writeEventInfos; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXNID((short)1, "txnid"),
+    REPL_POLICY((short)2, "replPolicy"),
+    WRITE_EVENT_INFOS((short)3, "writeEventInfos");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXNID
+          return TXNID;
+        case 2: // REPL_POLICY
+          return REPL_POLICY;
+        case 3: // WRITE_EVENT_INFOS
+          return WRITE_EVENT_INFOS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXNID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.REPL_POLICY,_Fields.WRITE_EVENT_INFOS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.REPL_POLICY, new org.apache.thrift.meta_data.FieldMetaData("replPolicy", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.WRITE_EVENT_INFOS, new org.apache.thrift.meta_data.FieldMetaData("writeEventInfos", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT            , "WriteEventInfo"))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CommitTxnRequest.class, metaDataMap);
+  }
+
+  public CommitTxnRequest() {
+  }
+
+  public CommitTxnRequest(
+    long txnid)
+  {
+    this();
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CommitTxnRequest(CommitTxnRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.txnid = other.txnid;
+    if (other.isSetReplPolicy()) {
+      this.replPolicy = other.replPolicy;
+    }
+    if (other.isSetWriteEventInfos()) {
+      List<WriteEventInfo> __this__writeEventInfos = new ArrayList<WriteEventInfo>(other.writeEventInfos.size());
+      for (WriteEventInfo other_element : other.writeEventInfos) {
+        __this__writeEventInfos.add(other_element);
+      }
+      this.writeEventInfos = __this__writeEventInfos;
+    }
+  }
+
+  public CommitTxnRequest deepCopy() {
+    return new CommitTxnRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setTxnidIsSet(false);
+    this.txnid = 0;
+    this.replPolicy = null;
+    this.writeEventInfos = null;
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public String getReplPolicy() {
+    return this.replPolicy;
+  }
+
+  public void setReplPolicy(String replPolicy) {
+    this.replPolicy = replPolicy;
+  }
+
+  public void unsetReplPolicy() {
+    this.replPolicy = null;
+  }
+
+  /** Returns true if field replPolicy is set (has been assigned a value) and false otherwise */
+  public boolean isSetReplPolicy() {
+    return this.replPolicy != null;
+  }
+
+  public void setReplPolicyIsSet(boolean value) {
+    if (!value) {
+      this.replPolicy = null;
+    }
+  }
+
+  public int getWriteEventInfosSize() {
+    return (this.writeEventInfos == null) ? 0 : this.writeEventInfos.size();
+  }
+
+  public java.util.Iterator<WriteEventInfo> getWriteEventInfosIterator() {
+    return (this.writeEventInfos == null) ? null : this.writeEventInfos.iterator();
+  }
+
+  public void addToWriteEventInfos(WriteEventInfo elem) {
+    if (this.writeEventInfos == null) {
+      this.writeEventInfos = new ArrayList<WriteEventInfo>();
+    }
+    this.writeEventInfos.add(elem);
+  }
+
+  public List<WriteEventInfo> getWriteEventInfos() {
+    return this.writeEventInfos;
+  }
+
+  public void setWriteEventInfos(List<WriteEventInfo> writeEventInfos) {
+    this.writeEventInfos = writeEventInfos;
+  }
+
+  public void unsetWriteEventInfos() {
+    this.writeEventInfos = null;
+  }
+
+  /** Returns true if field writeEventInfos is set (has been assigned a value) and false otherwise */
+  public boolean isSetWriteEventInfos() {
+    return this.writeEventInfos != null;
+  }
+
+  public void setWriteEventInfosIsSet(boolean value) {
+    if (!value) {
+      this.writeEventInfos = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    case REPL_POLICY:
+      if (value == null) {
+        unsetReplPolicy();
+      } else {
+        setReplPolicy((String)value);
+      }
+      break;
+
+    case WRITE_EVENT_INFOS:
+      if (value == null) {
+        unsetWriteEventInfos();
+      } else {
+        setWriteEventInfos((List<WriteEventInfo>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXNID:
+      return getTxnid();
+
+    case REPL_POLICY:
+      return getReplPolicy();
+
+    case WRITE_EVENT_INFOS:
+      return getWriteEventInfos();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXNID:
+      return isSetTxnid();
+    case REPL_POLICY:
+      return isSetReplPolicy();
+    case WRITE_EVENT_INFOS:
+      return isSetWriteEventInfos();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CommitTxnRequest)
+      return this.equals((CommitTxnRequest)that);
+    return false;
+  }
+
+  public boolean equals(CommitTxnRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txnid = true;
+    boolean that_present_txnid = true;
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    boolean this_present_replPolicy = true && this.isSetReplPolicy();
+    boolean that_present_replPolicy = true && that.isSetReplPolicy();
+    if (this_present_replPolicy || that_present_replPolicy) {
+      if (!(this_present_replPolicy && that_present_replPolicy))
+        return false;
+      if (!this.replPolicy.equals(that.replPolicy))
+        return false;
+    }
+
+    boolean this_present_writeEventInfos = true && this.isSetWriteEventInfos();
+    boolean that_present_writeEventInfos = true && that.isSetWriteEventInfos();
+    if (this_present_writeEventInfos || that_present_writeEventInfos) {
+      if (!(this_present_writeEventInfos && that_present_writeEventInfos))
+        return false;
+      if (!this.writeEventInfos.equals(that.writeEventInfos))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txnid = true;
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    boolean present_replPolicy = true && (isSetReplPolicy());
+    list.add(present_replPolicy);
+    if (present_replPolicy)
+      list.add(replPolicy);
+
+    boolean present_writeEventInfos = true && (isSetWriteEventInfos());
+    list.add(present_writeEventInfos);
+    if (present_writeEventInfos)
+      list.add(writeEventInfos);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CommitTxnRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetReplPolicy()).compareTo(other.isSetReplPolicy());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReplPolicy()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replPolicy, other.replPolicy);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetWriteEventInfos()).compareTo(other.isSetWriteEventInfos());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetWriteEventInfos()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeEventInfos, other.writeEventInfos);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CommitTxnRequest(");
+    boolean first = true;
+
+    sb.append("txnid:");
+    sb.append(this.txnid);
+    first = false;
+    if (isSetReplPolicy()) {
+      if (!first) sb.append(", ");
+      sb.append("replPolicy:");
+      if (this.replPolicy == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.replPolicy);
+      }
+      first = false;
+    }
+    if (isSetWriteEventInfos()) {
+      if (!first) sb.append(", ");
+      sb.append("writeEventInfos:");
+      if (this.writeEventInfos == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.writeEventInfos);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxnid()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnid' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CommitTxnRequestStandardSchemeFactory implements SchemeFactory {
+    public CommitTxnRequestStandardScheme getScheme() {
+      return new CommitTxnRequestStandardScheme();
+    }
+  }
+
+  private static class CommitTxnRequestStandardScheme extends StandardScheme<CommitTxnRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CommitTxnRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // REPL_POLICY
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.replPolicy = iprot.readString();
+              struct.setReplPolicyIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // WRITE_EVENT_INFOS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list594 = iprot.readListBegin();
+                struct.writeEventInfos = new ArrayList<WriteEventInfo>(_list594.size);
+                WriteEventInfo _elem595;
+                for (int _i596 = 0; _i596 < _list594.size; ++_i596)
+                {
+                  _elem595 = new WriteEventInfo();
+                  _elem595.read(iprot);
+                  struct.writeEventInfos.add(_elem595);
+                }
+                iprot.readListEnd();
+              }
+              struct.setWriteEventInfosIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CommitTxnRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TXNID_FIELD_DESC);
+      oprot.writeI64(struct.txnid);
+      oprot.writeFieldEnd();
+      if (struct.replPolicy != null) {
+        if (struct.isSetReplPolicy()) {
+          oprot.writeFieldBegin(REPL_POLICY_FIELD_DESC);
+          oprot.writeString(struct.replPolicy);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.writeEventInfos != null) {
+        if (struct.isSetWriteEventInfos()) {
+          oprot.writeFieldBegin(WRITE_EVENT_INFOS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.writeEventInfos.size()));
+            for (WriteEventInfo _iter597 : struct.writeEventInfos)
+            {
+              _iter597.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CommitTxnRequestTupleSchemeFactory implements SchemeFactory {
+    public CommitTxnRequestTupleScheme getScheme() {
+      return new CommitTxnRequestTupleScheme();
+    }
+  }
+
+  private static class CommitTxnRequestTupleScheme extends TupleScheme<CommitTxnRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.txnid);
+      BitSet optionals = new BitSet();
+      if (struct.isSetReplPolicy()) {
+        optionals.set(0);
+      }
+      if (struct.isSetWriteEventInfos()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetReplPolicy()) {
+        oprot.writeString(struct.replPolicy);
+      }
+      if (struct.isSetWriteEventInfos()) {
+        {
+          oprot.writeI32(struct.writeEventInfos.size());
+          for (WriteEventInfo _iter598 : struct.writeEventInfos)
+          {
+            _iter598.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CommitTxnRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.txnid = iprot.readI64();
+      struct.setTxnidIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.replPolicy = iprot.readString();
+        struct.setReplPolicyIsSet(true);
+      }
+      if (incoming.get(1)) {
+        {
+          org.apache.thrift.protocol.TList _list599 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.writeEventInfos = new ArrayList<WriteEventInfo>(_list599.size);
+          WriteEventInfo _elem600;
+          for (int _i601 = 0; _i601 < _list599.size; ++_i601)
+          {
+            _elem600 = new WriteEventInfo();
+            _elem600.read(iprot);
+            struct.writeEventInfos.add(_elem600);
+          }
+        }
+        struct.setWriteEventInfosIsSet(true);
+      }
+    }
+  }
+
+}
+


[24/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
new file mode 100644
index 0000000..b8f6d24
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetRoleGrantsForPrincipalResponse implements org.apache.thrift.TBase<GetRoleGrantsForPrincipalResponse, GetRoleGrantsForPrincipalResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetRoleGrantsForPrincipalResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRoleGrantsForPrincipalResponse");
+
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_GRANTS_FIELD_DESC = new org.apache.thrift.protocol.TField("principalGrants", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetRoleGrantsForPrincipalResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetRoleGrantsForPrincipalResponseTupleSchemeFactory());
+  }
+
+  private List<RolePrincipalGrant> principalGrants; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRINCIPAL_GRANTS((short)1, "principalGrants");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRINCIPAL_GRANTS
+          return PRINCIPAL_GRANTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRINCIPAL_GRANTS, new org.apache.thrift.meta_data.FieldMetaData("principalGrants", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RolePrincipalGrant.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetRoleGrantsForPrincipalResponse.class, metaDataMap);
+  }
+
+  public GetRoleGrantsForPrincipalResponse() {
+  }
+
+  public GetRoleGrantsForPrincipalResponse(
+    List<RolePrincipalGrant> principalGrants)
+  {
+    this();
+    this.principalGrants = principalGrants;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetRoleGrantsForPrincipalResponse(GetRoleGrantsForPrincipalResponse other) {
+    if (other.isSetPrincipalGrants()) {
+      List<RolePrincipalGrant> __this__principalGrants = new ArrayList<RolePrincipalGrant>(other.principalGrants.size());
+      for (RolePrincipalGrant other_element : other.principalGrants) {
+        __this__principalGrants.add(new RolePrincipalGrant(other_element));
+      }
+      this.principalGrants = __this__principalGrants;
+    }
+  }
+
+  public GetRoleGrantsForPrincipalResponse deepCopy() {
+    return new GetRoleGrantsForPrincipalResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.principalGrants = null;
+  }
+
+  public int getPrincipalGrantsSize() {
+    return (this.principalGrants == null) ? 0 : this.principalGrants.size();
+  }
+
+  public java.util.Iterator<RolePrincipalGrant> getPrincipalGrantsIterator() {
+    return (this.principalGrants == null) ? null : this.principalGrants.iterator();
+  }
+
+  public void addToPrincipalGrants(RolePrincipalGrant elem) {
+    if (this.principalGrants == null) {
+      this.principalGrants = new ArrayList<RolePrincipalGrant>();
+    }
+    this.principalGrants.add(elem);
+  }
+
+  public List<RolePrincipalGrant> getPrincipalGrants() {
+    return this.principalGrants;
+  }
+
+  public void setPrincipalGrants(List<RolePrincipalGrant> principalGrants) {
+    this.principalGrants = principalGrants;
+  }
+
+  public void unsetPrincipalGrants() {
+    this.principalGrants = null;
+  }
+
+  /** Returns true if field principalGrants is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalGrants() {
+    return this.principalGrants != null;
+  }
+
+  public void setPrincipalGrantsIsSet(boolean value) {
+    if (!value) {
+      this.principalGrants = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRINCIPAL_GRANTS:
+      if (value == null) {
+        unsetPrincipalGrants();
+      } else {
+        setPrincipalGrants((List<RolePrincipalGrant>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRINCIPAL_GRANTS:
+      return getPrincipalGrants();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRINCIPAL_GRANTS:
+      return isSetPrincipalGrants();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetRoleGrantsForPrincipalResponse)
+      return this.equals((GetRoleGrantsForPrincipalResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetRoleGrantsForPrincipalResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_principalGrants = true && this.isSetPrincipalGrants();
+    boolean that_present_principalGrants = true && that.isSetPrincipalGrants();
+    if (this_present_principalGrants || that_present_principalGrants) {
+      if (!(this_present_principalGrants && that_present_principalGrants))
+        return false;
+      if (!this.principalGrants.equals(that.principalGrants))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_principalGrants = true && (isSetPrincipalGrants());
+    list.add(present_principalGrants);
+    if (present_principalGrants)
+      list.add(principalGrants);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetRoleGrantsForPrincipalResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrincipalGrants()).compareTo(other.isSetPrincipalGrants());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalGrants()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalGrants, other.principalGrants);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetRoleGrantsForPrincipalResponse(");
+    boolean first = true;
+
+    sb.append("principalGrants:");
+    if (this.principalGrants == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalGrants);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPrincipalGrants()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'principalGrants' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetRoleGrantsForPrincipalResponseStandardSchemeFactory implements SchemeFactory {
+    public GetRoleGrantsForPrincipalResponseStandardScheme getScheme() {
+      return new GetRoleGrantsForPrincipalResponseStandardScheme();
+    }
+  }
+
+  private static class GetRoleGrantsForPrincipalResponseStandardScheme extends StandardScheme<GetRoleGrantsForPrincipalResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetRoleGrantsForPrincipalResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRINCIPAL_GRANTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list78 = iprot.readListBegin();
+                struct.principalGrants = new ArrayList<RolePrincipalGrant>(_list78.size);
+                RolePrincipalGrant _elem79;
+                for (int _i80 = 0; _i80 < _list78.size; ++_i80)
+                {
+                  _elem79 = new RolePrincipalGrant();
+                  _elem79.read(iprot);
+                  struct.principalGrants.add(_elem79);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPrincipalGrantsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetRoleGrantsForPrincipalResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.principalGrants != null) {
+        oprot.writeFieldBegin(PRINCIPAL_GRANTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.principalGrants.size()));
+          for (RolePrincipalGrant _iter81 : struct.principalGrants)
+          {
+            _iter81.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetRoleGrantsForPrincipalResponseTupleSchemeFactory implements SchemeFactory {
+    public GetRoleGrantsForPrincipalResponseTupleScheme getScheme() {
+      return new GetRoleGrantsForPrincipalResponseTupleScheme();
+    }
+  }
+
+  private static class GetRoleGrantsForPrincipalResponseTupleScheme extends TupleScheme<GetRoleGrantsForPrincipalResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPrincipalResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.principalGrants.size());
+        for (RolePrincipalGrant _iter82 : struct.principalGrants)
+        {
+          _iter82.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPrincipalResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list83 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.principalGrants = new ArrayList<RolePrincipalGrant>(_list83.size);
+        RolePrincipalGrant _elem84;
+        for (int _i85 = 0; _i85 < _list83.size; ++_i85)
+        {
+          _elem84 = new RolePrincipalGrant();
+          _elem84.read(iprot);
+          struct.principalGrants.add(_elem84);
+        }
+      }
+      struct.setPrincipalGrantsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRuntimeStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRuntimeStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRuntimeStatsRequest.java
new file mode 100644
index 0000000..593b7bf
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRuntimeStatsRequest.java
@@ -0,0 +1,482 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetRuntimeStatsRequest implements org.apache.thrift.TBase<GetRuntimeStatsRequest, GetRuntimeStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetRuntimeStatsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRuntimeStatsRequest");
+
+  private static final org.apache.thrift.protocol.TField MAX_WEIGHT_FIELD_DESC = new org.apache.thrift.protocol.TField("maxWeight", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField MAX_CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("maxCreateTime", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetRuntimeStatsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetRuntimeStatsRequestTupleSchemeFactory());
+  }
+
+  private int maxWeight; // required
+  private int maxCreateTime; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MAX_WEIGHT((short)1, "maxWeight"),
+    MAX_CREATE_TIME((short)2, "maxCreateTime");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MAX_WEIGHT
+          return MAX_WEIGHT;
+        case 2: // MAX_CREATE_TIME
+          return MAX_CREATE_TIME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __MAXWEIGHT_ISSET_ID = 0;
+  private static final int __MAXCREATETIME_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MAX_WEIGHT, new org.apache.thrift.meta_data.FieldMetaData("maxWeight", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.MAX_CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("maxCreateTime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetRuntimeStatsRequest.class, metaDataMap);
+  }
+
+  public GetRuntimeStatsRequest() {
+  }
+
+  public GetRuntimeStatsRequest(
+    int maxWeight,
+    int maxCreateTime)
+  {
+    this();
+    this.maxWeight = maxWeight;
+    setMaxWeightIsSet(true);
+    this.maxCreateTime = maxCreateTime;
+    setMaxCreateTimeIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetRuntimeStatsRequest(GetRuntimeStatsRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.maxWeight = other.maxWeight;
+    this.maxCreateTime = other.maxCreateTime;
+  }
+
+  public GetRuntimeStatsRequest deepCopy() {
+    return new GetRuntimeStatsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setMaxWeightIsSet(false);
+    this.maxWeight = 0;
+    setMaxCreateTimeIsSet(false);
+    this.maxCreateTime = 0;
+  }
+
+  public int getMaxWeight() {
+    return this.maxWeight;
+  }
+
+  public void setMaxWeight(int maxWeight) {
+    this.maxWeight = maxWeight;
+    setMaxWeightIsSet(true);
+  }
+
+  public void unsetMaxWeight() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXWEIGHT_ISSET_ID);
+  }
+
+  /** Returns true if field maxWeight is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaxWeight() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAXWEIGHT_ISSET_ID);
+  }
+
+  public void setMaxWeightIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXWEIGHT_ISSET_ID, value);
+  }
+
+  public int getMaxCreateTime() {
+    return this.maxCreateTime;
+  }
+
+  public void setMaxCreateTime(int maxCreateTime) {
+    this.maxCreateTime = maxCreateTime;
+    setMaxCreateTimeIsSet(true);
+  }
+
+  public void unsetMaxCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXCREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field maxCreateTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaxCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAXCREATETIME_ISSET_ID);
+  }
+
+  public void setMaxCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXCREATETIME_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MAX_WEIGHT:
+      if (value == null) {
+        unsetMaxWeight();
+      } else {
+        setMaxWeight((Integer)value);
+      }
+      break;
+
+    case MAX_CREATE_TIME:
+      if (value == null) {
+        unsetMaxCreateTime();
+      } else {
+        setMaxCreateTime((Integer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MAX_WEIGHT:
+      return getMaxWeight();
+
+    case MAX_CREATE_TIME:
+      return getMaxCreateTime();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MAX_WEIGHT:
+      return isSetMaxWeight();
+    case MAX_CREATE_TIME:
+      return isSetMaxCreateTime();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetRuntimeStatsRequest)
+      return this.equals((GetRuntimeStatsRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetRuntimeStatsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_maxWeight = true;
+    boolean that_present_maxWeight = true;
+    if (this_present_maxWeight || that_present_maxWeight) {
+      if (!(this_present_maxWeight && that_present_maxWeight))
+        return false;
+      if (this.maxWeight != that.maxWeight)
+        return false;
+    }
+
+    boolean this_present_maxCreateTime = true;
+    boolean that_present_maxCreateTime = true;
+    if (this_present_maxCreateTime || that_present_maxCreateTime) {
+      if (!(this_present_maxCreateTime && that_present_maxCreateTime))
+        return false;
+      if (this.maxCreateTime != that.maxCreateTime)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_maxWeight = true;
+    list.add(present_maxWeight);
+    if (present_maxWeight)
+      list.add(maxWeight);
+
+    boolean present_maxCreateTime = true;
+    list.add(present_maxCreateTime);
+    if (present_maxCreateTime)
+      list.add(maxCreateTime);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetRuntimeStatsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMaxWeight()).compareTo(other.isSetMaxWeight());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaxWeight()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxWeight, other.maxWeight);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMaxCreateTime()).compareTo(other.isSetMaxCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaxCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxCreateTime, other.maxCreateTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetRuntimeStatsRequest(");
+    boolean first = true;
+
+    sb.append("maxWeight:");
+    sb.append(this.maxWeight);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("maxCreateTime:");
+    sb.append(this.maxCreateTime);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetMaxWeight()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxWeight' is unset! Struct:" + toString());
+    }
+
+    if (!isSetMaxCreateTime()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxCreateTime' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetRuntimeStatsRequestStandardSchemeFactory implements SchemeFactory {
+    public GetRuntimeStatsRequestStandardScheme getScheme() {
+      return new GetRuntimeStatsRequestStandardScheme();
+    }
+  }
+
+  private static class GetRuntimeStatsRequestStandardScheme extends StandardScheme<GetRuntimeStatsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetRuntimeStatsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MAX_WEIGHT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.maxWeight = iprot.readI32();
+              struct.setMaxWeightIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // MAX_CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.maxCreateTime = iprot.readI32();
+              struct.setMaxCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetRuntimeStatsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(MAX_WEIGHT_FIELD_DESC);
+      oprot.writeI32(struct.maxWeight);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(MAX_CREATE_TIME_FIELD_DESC);
+      oprot.writeI32(struct.maxCreateTime);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetRuntimeStatsRequestTupleSchemeFactory implements SchemeFactory {
+    public GetRuntimeStatsRequestTupleScheme getScheme() {
+      return new GetRuntimeStatsRequestTupleScheme();
+    }
+  }
+
+  private static class GetRuntimeStatsRequestTupleScheme extends TupleScheme<GetRuntimeStatsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetRuntimeStatsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI32(struct.maxWeight);
+      oprot.writeI32(struct.maxCreateTime);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetRuntimeStatsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.maxWeight = iprot.readI32();
+      struct.setMaxWeightIsSet(true);
+      struct.maxCreateTime = iprot.readI32();
+      struct.setMaxCreateTimeIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetSerdeRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetSerdeRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetSerdeRequest.java
new file mode 100644
index 0000000..bf0a595
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetSerdeRequest.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetSerdeRequest implements org.apache.thrift.TBase<GetSerdeRequest, GetSerdeRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetSerdeRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetSerdeRequest");
+
+  private static final org.apache.thrift.protocol.TField SERDE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("serdeName", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetSerdeRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetSerdeRequestTupleSchemeFactory());
+  }
+
+  private String serdeName; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SERDE_NAME((short)1, "serdeName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SERDE_NAME
+          return SERDE_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SERDE_NAME, new org.apache.thrift.meta_data.FieldMetaData("serdeName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetSerdeRequest.class, metaDataMap);
+  }
+
+  public GetSerdeRequest() {
+  }
+
+  public GetSerdeRequest(
+    String serdeName)
+  {
+    this();
+    this.serdeName = serdeName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetSerdeRequest(GetSerdeRequest other) {
+    if (other.isSetSerdeName()) {
+      this.serdeName = other.serdeName;
+    }
+  }
+
+  public GetSerdeRequest deepCopy() {
+    return new GetSerdeRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.serdeName = null;
+  }
+
+  public String getSerdeName() {
+    return this.serdeName;
+  }
+
+  public void setSerdeName(String serdeName) {
+    this.serdeName = serdeName;
+  }
+
+  public void unsetSerdeName() {
+    this.serdeName = null;
+  }
+
+  /** Returns true if field serdeName is set (has been assigned a value) and false otherwise */
+  public boolean isSetSerdeName() {
+    return this.serdeName != null;
+  }
+
+  public void setSerdeNameIsSet(boolean value) {
+    if (!value) {
+      this.serdeName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SERDE_NAME:
+      if (value == null) {
+        unsetSerdeName();
+      } else {
+        setSerdeName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SERDE_NAME:
+      return getSerdeName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SERDE_NAME:
+      return isSetSerdeName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetSerdeRequest)
+      return this.equals((GetSerdeRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetSerdeRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_serdeName = true && this.isSetSerdeName();
+    boolean that_present_serdeName = true && that.isSetSerdeName();
+    if (this_present_serdeName || that_present_serdeName) {
+      if (!(this_present_serdeName && that_present_serdeName))
+        return false;
+      if (!this.serdeName.equals(that.serdeName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_serdeName = true && (isSetSerdeName());
+    list.add(present_serdeName);
+    if (present_serdeName)
+      list.add(serdeName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetSerdeRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSerdeName()).compareTo(other.isSetSerdeName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSerdeName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serdeName, other.serdeName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetSerdeRequest(");
+    boolean first = true;
+
+    sb.append("serdeName:");
+    if (this.serdeName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.serdeName);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetSerdeRequestStandardSchemeFactory implements SchemeFactory {
+    public GetSerdeRequestStandardScheme getScheme() {
+      return new GetSerdeRequestStandardScheme();
+    }
+  }
+
+  private static class GetSerdeRequestStandardScheme extends StandardScheme<GetSerdeRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetSerdeRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SERDE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.serdeName = iprot.readString();
+              struct.setSerdeNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetSerdeRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.serdeName != null) {
+        oprot.writeFieldBegin(SERDE_NAME_FIELD_DESC);
+        oprot.writeString(struct.serdeName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetSerdeRequestTupleSchemeFactory implements SchemeFactory {
+    public GetSerdeRequestTupleScheme getScheme() {
+      return new GetSerdeRequestTupleScheme();
+    }
+  }
+
+  private static class GetSerdeRequestTupleScheme extends TupleScheme<GetSerdeRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetSerdeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSerdeName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetSerdeName()) {
+        oprot.writeString(struct.serdeName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetSerdeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.serdeName = iprot.readString();
+        struct.setSerdeNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
new file mode 100644
index 0000000..3c88d8f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@ -0,0 +1,711 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTableRequest implements org.apache.thrift.TBase<GetTableRequest, GetTableRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetTableRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetTableRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetTableRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private ClientCapabilities capabilities; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    CAPABILITIES((short)3, "capabilities"),
+    CAT_NAME((short)4, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // CAPABILITIES
+          return CAPABILITIES;
+        case 4: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap);
+  }
+
+  public GetTableRequest() {
+  }
+
+  public GetTableRequest(
+    String dbName,
+    String tblName)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetTableRequest(GetTableRequest other) {
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetCapabilities()) {
+      this.capabilities = new ClientCapabilities(other.capabilities);
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public GetTableRequest deepCopy() {
+    return new GetTableRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.capabilities = null;
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public ClientCapabilities getCapabilities() {
+    return this.capabilities;
+  }
+
+  public void setCapabilities(ClientCapabilities capabilities) {
+    this.capabilities = capabilities;
+  }
+
+  public void unsetCapabilities() {
+    this.capabilities = null;
+  }
+
+  /** Returns true if field capabilities is set (has been assigned a value) and false otherwise */
+  public boolean isSetCapabilities() {
+    return this.capabilities != null;
+  }
+
+  public void setCapabilitiesIsSet(boolean value) {
+    if (!value) {
+      this.capabilities = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case CAPABILITIES:
+      if (value == null) {
+        unsetCapabilities();
+      } else {
+        setCapabilities((ClientCapabilities)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case CAPABILITIES:
+      return getCapabilities();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case CAPABILITIES:
+      return isSetCapabilities();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetTableRequest)
+      return this.equals((GetTableRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetTableRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_capabilities = true && this.isSetCapabilities();
+    boolean that_present_capabilities = true && that.isSetCapabilities();
+    if (this_present_capabilities || that_present_capabilities) {
+      if (!(this_present_capabilities && that_present_capabilities))
+        return false;
+      if (!this.capabilities.equals(that.capabilities))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_capabilities = true && (isSetCapabilities());
+    list.add(present_capabilities);
+    if (present_capabilities)
+      list.add(capabilities);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetTableRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCapabilities()).compareTo(other.isSetCapabilities());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCapabilities()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.capabilities, other.capabilities);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetTableRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (isSetCapabilities()) {
+      if (!first) sb.append(", ");
+      sb.append("capabilities:");
+      if (this.capabilities == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.capabilities);
+      }
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (capabilities != null) {
+      capabilities.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetTableRequestStandardSchemeFactory implements SchemeFactory {
+    public GetTableRequestStandardScheme getScheme() {
+      return new GetTableRequestStandardScheme();
+    }
+  }
+
+  private static class GetTableRequestStandardScheme extends StandardScheme<GetTableRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CAPABILITIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.capabilities = new ClientCapabilities();
+              struct.capabilities.read(iprot);
+              struct.setCapabilitiesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.capabilities != null) {
+        if (struct.isSetCapabilities()) {
+          oprot.writeFieldBegin(CAPABILITIES_FIELD_DESC);
+          struct.capabilities.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetTableRequestTupleSchemeFactory implements SchemeFactory {
+    public GetTableRequestTupleScheme getScheme() {
+      return new GetTableRequestTupleScheme();
+    }
+  }
+
+  private static class GetTableRequestTupleScheme extends TupleScheme<GetTableRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetCapabilities()) {
+        optionals.set(0);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetCapabilities()) {
+        struct.capabilities.write(oprot);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.capabilities = new ClientCapabilities();
+        struct.capabilities.read(iprot);
+        struct.setCapabilitiesIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
new file mode 100644
index 0000000..968e250
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
@@ -0,0 +1,394 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTableResult implements org.apache.thrift.TBase<GetTableResult, GetTableResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetTableResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableResult");
+
+  private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetTableResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetTableResultTupleSchemeFactory());
+  }
+
+  private Table table; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TABLE((short)1, "table");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TABLE
+          return TABLE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableResult.class, metaDataMap);
+  }
+
+  public GetTableResult() {
+  }
+
+  public GetTableResult(
+    Table table)
+  {
+    this();
+    this.table = table;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetTableResult(GetTableResult other) {
+    if (other.isSetTable()) {
+      this.table = new Table(other.table);
+    }
+  }
+
+  public GetTableResult deepCopy() {
+    return new GetTableResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.table = null;
+  }
+
+  public Table getTable() {
+    return this.table;
+  }
+
+  public void setTable(Table table) {
+    this.table = table;
+  }
+
+  public void unsetTable() {
+    this.table = null;
+  }
+
+  /** Returns true if field table is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable() {
+    return this.table != null;
+  }
+
+  public void setTableIsSet(boolean value) {
+    if (!value) {
+      this.table = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TABLE:
+      if (value == null) {
+        unsetTable();
+      } else {
+        setTable((Table)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TABLE:
+      return getTable();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TABLE:
+      return isSetTable();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetTableResult)
+      return this.equals((GetTableResult)that);
+    return false;
+  }
+
+  public boolean equals(GetTableResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_table = true && this.isSetTable();
+    boolean that_present_table = true && that.isSetTable();
+    if (this_present_table || that_present_table) {
+      if (!(this_present_table && that_present_table))
+        return false;
+      if (!this.table.equals(that.table))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_table = true && (isSetTable());
+    list.add(present_table);
+    if (present_table)
+      list.add(table);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetTableResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTable()).compareTo(other.isSetTable());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table, other.table);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetTableResult(");
+    boolean first = true;
+
+    sb.append("table:");
+    if (this.table == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTable()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (table != null) {
+      table.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetTableResultStandardSchemeFactory implements SchemeFactory {
+    public GetTableResultStandardScheme getScheme() {
+      return new GetTableResultStandardScheme();
+    }
+  }
+
+  private static class GetTableResultStandardScheme extends StandardScheme<GetTableResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TABLE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.table = new Table();
+              struct.table.read(iprot);
+              struct.setTableIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.table != null) {
+        oprot.writeFieldBegin(TABLE_FIELD_DESC);
+        struct.table.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetTableResultTupleSchemeFactory implements SchemeFactory {
+    public GetTableResultTupleScheme getScheme() {
+      return new GetTableResultTupleScheme();
+    }
+  }
+
+  private static class GetTableResultTupleScheme extends TupleScheme<GetTableResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      struct.table.write(oprot);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.table = new Table();
+      struct.table.read(iprot);
+      struct.setTableIsSet(true);
+    }
+  }
+
+}
+


[60/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
index 0000000,faca669..e113b10
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/oracle/hive-schema-4.0.0.oracle.sql
@@@ -1,0 -1,1165 +1,1167 @@@
+ -- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE SEQUENCE_TABLE
+ (
+    SEQUENCE_NAME VARCHAR2(255) NOT NULL,
+    NEXT_VAL NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE SEQUENCE_TABLE ADD CONSTRAINT PART_TABLE_PK PRIMARY KEY (SEQUENCE_NAME);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ -- Table NUCLEUS_TABLES is an internal table required by DataNucleus.
+ -- This table is required if datanucleus.autoStartMechanism=SchemaTable
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE NUCLEUS_TABLES
+ (
+    CLASS_NAME VARCHAR2(128) NOT NULL,
+    TABLE_NAME VARCHAR2(128) NOT NULL,
+    TYPE VARCHAR2(4) NOT NULL,
+    OWNER VARCHAR2(2) NOT NULL,
+    VERSION VARCHAR2(20) NOT NULL,
+    INTERFACE_NAME VARCHAR2(255) NULL
+ );
+ 
+ ALTER TABLE NUCLEUS_TABLES ADD CONSTRAINT NUCLEUS_TABLES_PK PRIMARY KEY (CLASS_NAME);
+ 
+ -- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ CREATE TABLE PART_COL_PRIVS
+ (
+     PART_COLUMN_GRANT_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PART_ID NUMBER NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     PART_COL_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+ 
+ -- Table CDS.
+ CREATE TABLE CDS
+ (
+     CD_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+ 
+ -- Table COLUMNS_V2 for join relationship
+ CREATE TABLE COLUMNS_V2
+ (
+     CD_ID NUMBER NOT NULL,
+     "COMMENT" VARCHAR2(256) NULL,
+     "COLUMN_NAME" VARCHAR2(767) NOT NULL,
+     TYPE_NAME CLOB NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+ 
+ -- Table PARTITION_KEY_VALS for join relationship
+ CREATE TABLE PARTITION_KEY_VALS
+ (
+     PART_ID NUMBER NOT NULL,
+     PART_KEY_VAL VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+ 
+ CREATE TABLE CTLGS (
+     CTLG_ID NUMBER PRIMARY KEY,
+     "NAME" VARCHAR2(256),
+     "DESC" VARCHAR2(4000),
+     LOCATION_URI VARCHAR2(4000) NOT NULL,
+     UNIQUE ("NAME")
+ );
+ 
+ -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE TABLE DBS
+ (
+     DB_ID NUMBER NOT NULL,
+     "DESC" VARCHAR2(4000) NULL,
+     DB_LOCATION_URI VARCHAR2(4000) NOT NULL,
+     "NAME" VARCHAR2(128) NULL,
+     OWNER_NAME VARCHAR2(128) NULL,
+     OWNER_TYPE VARCHAR2(10) NULL,
+     CTLG_NAME VARCHAR2(256)
+ );
+ 
+ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+ 
+ -- Table PARTITION_PARAMS for join relationship
+ CREATE TABLE PARTITION_PARAMS
+ (
+     PART_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+ 
+ -- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ CREATE TABLE SERDES
+ (
+     SERDE_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NULL,
+     SLIB VARCHAR2(4000) NULL,
+     "DESCRIPTION" VARCHAR2(4000),
+     "SERIALIZER_CLASS" VARCHAR2(4000),
+     "DESERIALIZER_CLASS" VARCHAR2(4000),
+     "SERDE_TYPE" NUMBER
+ );
+ 
+ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+ 
+ -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE TABLE TYPES
+ (
+     TYPES_ID NUMBER NOT NULL,
+     TYPE_NAME VARCHAR2(128) NULL,
+     TYPE1 VARCHAR2(767) NULL,
+     TYPE2 VARCHAR2(767) NULL
+ );
+ 
+ ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+ 
+ -- Table PARTITION_KEYS for join relationship
+ CREATE TABLE PARTITION_KEYS
+ (
+     TBL_ID NUMBER NOT NULL,
+     PKEY_COMMENT VARCHAR2(4000) NULL,
+     PKEY_NAME VARCHAR2(128) NOT NULL,
+     PKEY_TYPE VARCHAR2(767) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+ 
+ -- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE TABLE ROLES
+ (
+     ROLE_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     OWNER_NAME VARCHAR2(128) NULL,
+     ROLE_NAME VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+ 
+ -- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+ CREATE TABLE PARTITIONS
+ (
+     PART_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     PART_NAME VARCHAR2(767) NULL,
+     SD_ID NUMBER NULL,
 -    TBL_ID NUMBER NULL
++    TBL_ID NUMBER NULL,
++    WRITE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+ 
+ -- Table INDEX_PARAMS for join relationship
+ CREATE TABLE INDEX_PARAMS
+ (
+     INDEX_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+ 
+ -- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ CREATE TABLE TBL_COL_PRIVS
+ (
+     TBL_COLUMN_GRANT_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     TBL_COL_PRIV VARCHAR2(128) NULL,
+     TBL_ID NUMBER NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+ 
+ -- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+ CREATE TABLE IDXS
+ (
+     INDEX_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DEFERRED_REBUILD NUMBER(1) NOT NULL CHECK (DEFERRED_REBUILD IN (1,0)),
+     INDEX_HANDLER_CLASS VARCHAR2(4000) NULL,
+     INDEX_NAME VARCHAR2(128) NULL,
+     INDEX_TBL_ID NUMBER NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     ORIG_TBL_ID NUMBER NULL,
+     SD_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+ 
+ -- Table BUCKETING_COLS for join relationship
+ CREATE TABLE BUCKETING_COLS
+ (
+     SD_ID NUMBER NOT NULL,
+     BUCKET_COL_NAME VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table TYPE_FIELDS for join relationship
+ CREATE TABLE TYPE_FIELDS
+ (
+     TYPE_NAME NUMBER NOT NULL,
+     "COMMENT" VARCHAR2(256) NULL,
+     FIELD_NAME VARCHAR2(128) NOT NULL,
+     FIELD_TYPE VARCHAR2(767) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+ 
+ -- Table SD_PARAMS for join relationship
+ CREATE TABLE SD_PARAMS
+ (
+     SD_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+ 
+ -- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE TABLE GLOBAL_PRIVS
+ (
+     USER_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     USER_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+ 
+ -- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ CREATE TABLE SDS
+ (
+     SD_ID NUMBER NOT NULL,
+     CD_ID NUMBER NULL,
+     INPUT_FORMAT VARCHAR2(4000) NULL,
+     IS_COMPRESSED NUMBER(1) NOT NULL CHECK (IS_COMPRESSED IN (1,0)),
+     LOCATION VARCHAR2(4000) NULL,
+     NUM_BUCKETS NUMBER (10) NOT NULL,
+     OUTPUT_FORMAT VARCHAR2(4000) NULL,
+     SERDE_ID NUMBER NULL,
+     IS_STOREDASSUBDIRECTORIES NUMBER(1) NOT NULL CHECK (IS_STOREDASSUBDIRECTORIES IN (1,0))
+ );
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+ 
+ -- Table TABLE_PARAMS for join relationship
+ CREATE TABLE TABLE_PARAMS
+ (
+     TBL_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+ 
+ -- Table SORT_COLS for join relationship
+ CREATE TABLE SORT_COLS
+ (
+     SD_ID NUMBER NOT NULL,
+     "COLUMN_NAME" VARCHAR2(767) NULL,
+     "ORDER" NUMBER (10) NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ CREATE TABLE TBL_PRIVS
+ (
+     TBL_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     TBL_PRIV VARCHAR2(128) NULL,
+     TBL_ID NUMBER NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+ 
+ -- Table DATABASE_PARAMS for join relationship
+ CREATE TABLE DATABASE_PARAMS
+ (
+     DB_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(180) NOT NULL,
+     PARAM_VALUE VARCHAR2(4000) NULL
+ );
+ 
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+ 
+ -- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ CREATE TABLE ROLE_MAP
+ (
+     ROLE_GRANT_ID NUMBER NOT NULL,
+     ADD_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     ROLE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+ 
+ -- Table SERDE_PARAMS for join relationship
+ CREATE TABLE SERDE_PARAMS
+ (
+     SERDE_ID NUMBER NOT NULL,
+     PARAM_KEY VARCHAR2(256) NOT NULL,
+     PARAM_VALUE CLOB NULL
+ );
+ 
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+ 
+ -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ CREATE TABLE PART_PRIVS
+ (
+     PART_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PART_ID NUMBER NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     PART_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+ 
+ -- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ CREATE TABLE DB_PRIVS
+ (
+     DB_GRANT_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DB_ID NUMBER NULL,
+     GRANT_OPTION NUMBER (5) NOT NULL,
+     GRANTOR VARCHAR2(128) NULL,
+     GRANTOR_TYPE VARCHAR2(128) NULL,
+     PRINCIPAL_NAME VARCHAR2(128) NULL,
+     PRINCIPAL_TYPE VARCHAR2(128) NULL,
+     DB_PRIV VARCHAR2(128) NULL,
+     AUTHORIZER VARCHAR2(128) NULL
+ );
+ 
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+ 
+ -- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+ CREATE TABLE TBLS
+ (
+     TBL_ID NUMBER NOT NULL,
+     CREATE_TIME NUMBER (10) NOT NULL,
+     DB_ID NUMBER NULL,
+     LAST_ACCESS_TIME NUMBER (10) NOT NULL,
+     OWNER VARCHAR2(767) NULL,
+     OWNER_TYPE VARCHAR2(10) NULL,
+     RETENTION NUMBER (10) NOT NULL,
+     SD_ID NUMBER NULL,
+     TBL_NAME VARCHAR2(256) NULL,
+     TBL_TYPE VARCHAR2(128) NULL,
+     VIEW_EXPANDED_TEXT CLOB NULL,
+     VIEW_ORIGINAL_TEXT CLOB NULL,
 -    IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0))
++    IS_REWRITE_ENABLED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_REWRITE_ENABLED IN (1,0)),
++    WRITE_ID NUMBER NULL
+ );
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NOT NULL,
+     DB_NAME VARCHAR2(128) NOT NULL,
+     TBL_NAME VARCHAR2(256) NOT NULL,
+     TXN_LIST CLOB NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME");
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     TBL_ID NUMBER NOT NULL
+ );
+ 
+ -- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE TABLE PARTITION_EVENTS
+ (
+     PART_NAME_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NULL,
+     DB_NAME VARCHAR2(128) NULL,
+     EVENT_TIME NUMBER NOT NULL,
+     EVENT_TYPE NUMBER (10) NOT NULL,
+     PARTITION_NAME VARCHAR2(767) NULL,
+     TBL_NAME VARCHAR2(256) NULL
+ );
+ 
+ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+ 
+ -- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+ CREATE TABLE SKEWED_STRING_LIST
+ (
+     STRING_LIST_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+ 
+ CREATE TABLE SKEWED_STRING_LIST_VALUES
+ (
+     STRING_LIST_ID NUMBER NOT NULL,
+     "STRING_LIST_VALUE" VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_COL_NAMES
+ (
+     SD_ID NUMBER NOT NULL,
+     "SKEWED_COL_NAME" VARCHAR2(256) NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+ (
+     SD_ID NUMBER NOT NULL,
+     STRING_LIST_ID_KID NUMBER NOT NULL,
+     "LOCATION" VARCHAR2(4000) NULL
+ );
+ 
+ CREATE TABLE MASTER_KEYS
+ (
+     KEY_ID NUMBER (10) NOT NULL,
+     MASTER_KEY VARCHAR2(767) NULL
+ );
+ 
+ CREATE TABLE DELEGATION_TOKENS
+ (
+     TOKEN_IDENT VARCHAR2(767) NOT NULL,
+     TOKEN VARCHAR2(767) NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE TABLE SKEWED_VALUES
+ (
+     SD_ID_OID NUMBER NOT NULL,
+     STRING_LIST_ID_EID NUMBER NOT NULL,
+     INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED;
+ 
+ -- column statistics
+ 
+ CREATE TABLE TAB_COL_STATS (
+  CS_ID NUMBER NOT NULL,
+  CAT_NAME VARCHAR2(256) NOT NULL,
+  DB_NAME VARCHAR2(128) NOT NULL,
+  TABLE_NAME VARCHAR2(256) NOT NULL,
+  COLUMN_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_TYPE VARCHAR2(128) NOT NULL,
+  TBL_ID NUMBER NOT NULL,
+  LONG_LOW_VALUE NUMBER,
+  LONG_HIGH_VALUE NUMBER,
+  DOUBLE_LOW_VALUE NUMBER,
+  DOUBLE_HIGH_VALUE NUMBER,
+  BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+  BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+  NUM_NULLS NUMBER NOT NULL,
+  NUM_DISTINCTS NUMBER,
+  BIT_VECTOR BLOB,
+  AVG_COL_LEN NUMBER,
+  MAX_COL_LEN NUMBER,
+  NUM_TRUES NUMBER,
+  NUM_FALSES NUMBER,
+  LAST_ANALYZED NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS(TBL_ID);
+ 
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
+ 
+ CREATE TABLE VERSION (
+   VER_ID NUMBER NOT NULL,
+   SCHEMA_VERSION VARCHAR(127) NOT NULL,
+   VERSION_COMMENT VARCHAR(255)
+ );
+ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+ 
+ CREATE TABLE PART_COL_STATS (
+  CS_ID NUMBER NOT NULL,
+  CAT_NAME VARCHAR2(256) NOT NULL,
+  DB_NAME VARCHAR2(128) NOT NULL,
+  TABLE_NAME VARCHAR2(256) NOT NULL,
+  PARTITION_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_NAME VARCHAR2(767) NOT NULL,
+  COLUMN_TYPE VARCHAR2(128) NOT NULL,
+  PART_ID NUMBER NOT NULL,
+  LONG_LOW_VALUE NUMBER,
+  LONG_HIGH_VALUE NUMBER,
+  DOUBLE_LOW_VALUE NUMBER,
+  DOUBLE_HIGH_VALUE NUMBER,
+  BIG_DECIMAL_LOW_VALUE VARCHAR2(4000),
+  BIG_DECIMAL_HIGH_VALUE VARCHAR2(4000),
+  NUM_NULLS NUMBER NOT NULL,
+  NUM_DISTINCTS NUMBER,
+  BIT_VECTOR BLOB,
+  AVG_COL_LEN NUMBER,
+  MAX_COL_LEN NUMBER,
+  NUM_TRUES NUMBER,
+  NUM_FALSES NUMBER,
+  LAST_ANALYZED NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PKEY PRIMARY KEY (CS_ID);
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED;
+ 
+ CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ CREATE TABLE FUNCS (
+   FUNC_ID NUMBER NOT NULL,
+   CLASS_NAME VARCHAR2(4000),
+   CREATE_TIME NUMBER(10) NOT NULL,
+   DB_ID NUMBER,
+   FUNC_NAME VARCHAR2(128),
+   FUNC_TYPE NUMBER(10) NOT NULL,
+   OWNER_NAME VARCHAR2(128),
+   OWNER_TYPE VARCHAR2(10)
+ );
+ 
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ 
+ CREATE TABLE FUNC_RU (
+   FUNC_ID NUMBER NOT NULL,
+   RESOURCE_TYPE NUMBER(10) NOT NULL,
+   RESOURCE_URI VARCHAR2(4000),
+   INTEGER_IDX NUMBER(10) NOT NULL
+ );
+ 
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID, INTEGER_IDX);
+ 
+ CREATE TABLE NOTIFICATION_LOG
+ (
+     NL_ID NUMBER NOT NULL,
+     EVENT_ID NUMBER NOT NULL,
+     EVENT_TIME NUMBER(10) NOT NULL,
+     EVENT_TYPE VARCHAR2(32) NOT NULL,
+     CAT_NAME VARCHAR2(256),
+     DB_NAME VARCHAR2(128),
+     TBL_NAME VARCHAR2(256),
+     MESSAGE CLOB NULL,
+     MESSAGE_FORMAT VARCHAR(16) NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+ 
+ CREATE TABLE NOTIFICATION_SEQUENCE
+ (
+     NNI_ID NUMBER NOT NULL,
+     NEXT_EVENT_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+ 
+ INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+ 
+ -- Tables to manage resource plans.
+ 
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     QUERY_PARALLELISM NUMBER(10),
+     STATUS VARCHAR2(20) NOT NULL,
+     DEFAULT_POOL_ID NUMBER
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     PATH VARCHAR2(1024) NOT NULL,
+     ALLOC_FRACTION NUMBER,
+     QUERY_PARALLELISM NUMBER(10),
+     SCHEDULING_POLICY VARCHAR2(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     TRIGGER_EXPRESSION VARCHAR2(1024),
+     ACTION_EXPRESSION VARCHAR2(1024),
+     IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0))
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID NUMBER NOT NULL,
+     TRIGGER_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     ENTITY_TYPE VARCHAR2(128) NOT NULL,
+     ENTITY_NAME VARCHAR2(128) NOT NULL,
+     POOL_ID NUMBER NOT NULL,
+     ORDERING NUMBER(10)
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+ 
+ CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table COLUMNS_V2
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEY_VALS
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+ 
+ 
+ -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME);
+ 
+ 
+ -- Constraints for table PARTITION_PARAMS
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+ 
+ 
+ -- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ 
+ -- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE UNIQUE INDEX UNIQUE_TYPE ON TYPES (TYPE_NAME);
+ 
+ 
+ -- Constraints for table PARTITION_KEYS
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+ 
+ 
+ -- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+ 
+ 
+ -- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+ 
+ CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+ 
+ 
+ -- Constraints for table INDEX_PARAMS
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+ 
+ 
+ -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+ 
+ 
+ -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N50 ON IDXS (INDEX_TBL_ID);
+ 
+ CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+ 
+ CREATE INDEX IDXS_N49 ON IDXS (ORIG_TBL_ID);
+ 
+ 
+ -- Constraints for table BUCKETING_COLS
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table TYPE_FIELDS
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+ 
+ 
+ -- Constraints for table SD_PARAMS
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+ 
+ 
+ -- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+ CREATE INDEX SDS_N50 ON SDS (CD_ID);
+ 
+ 
+ -- Constraints for table TABLE_PARAMS
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+ 
+ 
+ -- Constraints for table SORT_COLS
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+ 
+ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DATABASE_PARAMS
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+ 
+ 
+ -- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+ 
+ CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table SERDE_PARAMS
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+ 
+ 
+ -- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+ 
+ 
+ -- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED ;
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) INITIALLY DEFERRED ;
+ 
+ CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+ 
+ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+ 
+ 
+ -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+ 
+ 
+ -- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) INITIALLY DEFERRED;
+ 
+ CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME, DB_ID);
+ 
+ CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+ 
+ 
+ -- Constraints for table FUNC_RU for class(es) [org.apache.hadoop.hive.metastore.model.MFunctions]
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) INITIALLY DEFERRED;
+ 
+ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+ 
+ CREATE TABLE KEY_CONSTRAINTS
+ (
+   CHILD_CD_ID NUMBER,
+   CHILD_INTEGER_IDX NUMBER,
+   CHILD_TBL_ID NUMBER,
+   PARENT_CD_ID NUMBER,
+   PARENT_INTEGER_IDX NUMBER NOT NULL,
+   PARENT_TBL_ID NUMBER NOT NULL,
+   POSITION NUMBER NOT NULL,
+   CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+   CONSTRAINT_TYPE NUMBER NOT NULL,
+   UPDATE_RULE NUMBER,
+   DELETE_RULE NUMBER,
+   ENABLE_VALIDATE_RELY NUMBER NOT NULL,
+   DEFAULT_VALUE VARCHAR(400)
+ ) ;
+ 
+ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+ 
+ CREATE INDEX CONSTRAINTS_PT_INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+ 
+ CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ -- Table for METASTORE_DB_PROPERTIES and its constraints
+ CREATE TABLE METASTORE_DB_PROPERTIES
+ (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ -- Constraints for resource plan tables.
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ ------------------------------
+ -- Transaction and lock tables
+ ------------------------------
+ CREATE TABLE TXNS (
+   TXN_ID NUMBER(19) PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED NUMBER(19) NOT NULL,
+   TXN_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar2(128),
+   TXN_META_INFO varchar2(128),
+   TXN_HEARTBEAT_COUNT number(10),
+   TXN_TYPE number(10)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID NUMBER(19) NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE VARCHAR2(128) NOT NULL,
+   TC_TABLE VARCHAR2(128),
+   TC_PARTITION VARCHAR2(767) NULL,
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID NUMBER(19)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID NUMBER(19) NOT NULL,
+   CTC_DATABASE VARCHAR2(128) NOT NULL,
+   CTC_TABLE VARCHAR2(256),
+   CTC_PARTITION VARCHAR2(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID NUMBER(19)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID NUMBER(19) NOT NULL,
+   HL_LOCK_INT_ID NUMBER(19) NOT NULL,
+   HL_TXNID NUMBER(19) NOT NULL,
+   HL_DB VARCHAR2(128) NOT NULL,
+   HL_TABLE VARCHAR2(128),
+   HL_PARTITION VARCHAR2(767),
+   HL_LOCK_STATE CHAR(1) NOT NULL,
+   HL_LOCK_TYPE CHAR(1) NOT NULL,
+   HL_LAST_HEARTBEAT NUMBER(19) NOT NULL,
+   HL_ACQUIRED_AT NUMBER(19),
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT number(10),
+   HL_AGENT_INFO varchar2(128),
+   HL_BLOCKEDBY_EXT_ID number(19),
+   HL_BLOCKEDBY_INT_ID number(19),
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID NUMBER(19) PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START NUMBER(19),
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID NUMBER(19),
+   CQ_META_INFO BLOB,
+   CQ_HADOOP_JOB_ID varchar2(32)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT NUMBER(19) NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID NUMBER(19) PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START NUMBER(19),
+   CC_END NUMBER(19),
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID NUMBER(19),
+   CC_META_INFO BLOB,
+   CC_HADOOP_JOB_ID varchar2(32)
+ ) ROWDEPENDENCIES;
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar2(128) NOT NULL,
+   MT_KEY2 number(19) NOT NULL,
+   MT_COMMENT varchar2(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar2(128) NOT NULL,
+   WS_TABLE varchar2(128) NOT NULL,
+   WS_PARTITION varchar2(767),
+   WS_TXNID number(19) NOT NULL,
+   WS_COMMIT_ID number(19) NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID NUMBER(19) NOT NULL,
+   T2W_DATABASE VARCHAR2(128) NOT NULL,
+   T2W_TABLE VARCHAR2(256) NOT NULL,
+   T2W_WRITEID NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE VARCHAR2(128) NOT NULL,
+   NWI_TABLE VARCHAR2(256) NOT NULL,
+   NWI_NEXT NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID NUMBER(19) NOT NULL,
+   MHL_MIN_OPEN_TXNID NUMBER(19) NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" number primary key,
+   "SCHEMA_TYPE" number not null,
+   "NAME" varchar2(256) unique,
+   "DB_ID" number references "DBS" ("DB_ID"),
+   "COMPATIBILITY" number not null,
+   "VALIDATION_LEVEL" number not null,
+   "CAN_EVOLVE" number(1) not null,
+   "SCHEMA_GROUP" varchar2(256),
+   "DESCRIPTION" varchar2(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" number primary key,
+   "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" number not null,
+   "CREATED_AT" number not null,
+   "CD_ID" number references "CDS" ("CD_ID"), 
+   "STATE" number not null,
+   "DESCRIPTION" varchar2(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar2(256),
+   "SCHEMA_VERSION_NAME" varchar2(256),
+   "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+   UNIQUE ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID number(19) NOT NULL,
+   RTM_TARGET_TXN_ID number(19) NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID NUMBER primary key,
+   CREATE_TIME NUMBER(10) NOT NULL,
+   WEIGHT NUMBER(10) NOT NULL,
+   PAYLOAD BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID number(19) NOT NULL,
+   WNL_TXNID number(19) NOT NULL,
+   WNL_WRITEID number(19) NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ clob NOT NULL,
+   WNL_PARTITION_OBJ clob,
+   WNL_FILES clob,
+   WNL_EVENT_TIME number(10) NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ );
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
index 0000000,71f5034..c9c6b30
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-2.3.0-to-3.0.0.oracle.sql
@@@ -1,0 -1,342 +1,343 @@@
+ SELECT 'Upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
+ 
+ --@041-HIVE-16556.oracle.sql;
+ CREATE TABLE METASTORE_DB_PROPERTIES
+ (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ --@042-HIVE-16575.oracle.sql;
+ CREATE INDEX CONSTRAINTS_CT_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ --@043-HIVE-16922.oracle.sql;
+ UPDATE SERDE_PARAMS
+ SET PARAM_KEY='collection.delim'
+ WHERE PARAM_KEY='colelction.delim';
+ 
+ --@044-HIVE-16997.oracle.sql;
+ ALTER TABLE PART_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ ALTER TABLE TAB_COL_STATS ADD BIT_VECTOR BLOB NULL;
+ 
+ --@045-HIVE-16886.oracle.sql;
+ INSERT INTO NOTIFICATION_SEQUENCE (NNI_ID, NEXT_EVENT_ID) SELECT 1,1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_EVENT_ID FROM NOTIFICATION_SEQUENCE);
+ 
+ --@046-HIVE-17566.oracle.sql;
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     QUERY_PARALLELISM NUMBER(10),
+     STATUS VARCHAR2(20) NOT NULL,
+     DEFAULT_POOL_ID NUMBER
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     PATH VARCHAR2(1024) NOT NULL,
+     ALLOC_FRACTION NUMBER,
+     QUERY_PARALLELISM NUMBER(10),
+     SCHEDULING_POLICY VARCHAR2(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     "NAME" VARCHAR2(128) NOT NULL,
+     TRIGGER_EXPRESSION VARCHAR2(1024),
+     ACTION_EXPRESSION VARCHAR2(1024),
+     IS_IN_UNMANAGED NUMBER(1) DEFAULT 0 NOT NULL CHECK (IS_IN_UNMANAGED IN (1,0))
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID NUMBER NOT NULL,
+     TRIGGER_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID NUMBER NOT NULL,
+     RP_ID NUMBER NOT NULL,
+     ENTITY_TYPE VARCHAR2(128) NOT NULL,
+     ENTITY_NAME VARCHAR2(128) NOT NULL,
+     POOL_ID NUMBER NOT NULL,
+     ORDERING NUMBER(10)
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK2 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ -- Upgrades for Schema Registry objects
+ ALTER TABLE "SERDES" ADD "DESCRIPTION" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "SERIALIZER_CLASS" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "DESERIALIZER_CLASS" VARCHAR(4000);
+ ALTER TABLE "SERDES" ADD "SERDE_TYPE" INTEGER;
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" number primary key,
+   "SCHEMA_TYPE" number not null,
+   "NAME" varchar2(256) unique,
+   "DB_ID" number references "DBS" ("DB_ID"),
+   "COMPATIBILITY" number not null,
+   "VALIDATION_LEVEL" number not null,
+   "CAN_EVOLVE" number(1) not null,
+   "SCHEMA_GROUP" varchar2(256),
+   "DESCRIPTION" varchar2(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" number primary key,
+   "SCHEMA_ID" number references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" number not null,
+   "CREATED_AT" number not null,
+   "CD_ID" number references "CDS" ("CD_ID"), 
+   "STATE" number not null,
+   "DESCRIPTION" varchar2(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar2(256),
+   "SCHEMA_VERSION_NAME" varchar2(256),
+   "SERDE_ID" number references "SERDES" ("SERDE_ID"), 
+   UNIQUE ("SCHEMA_ID", "VERSION")
+ );
+ 
+ 
+ -- 048-HIVE-14498
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     CAT_NAME VARCHAR2(256) NOT NULL,
+     DB_NAME VARCHAR2(128) NOT NULL,
+     TBL_NAME VARCHAR2(256) NOT NULL,
+     TXN_LIST CLOB NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_TABLE ON MV_CREATION_METADATA ("DB_NAME", "TBL_NAME");
+ 
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID NUMBER NOT NULL,
+     TBL_ID NUMBER NOT NULL
+ );
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK1 FOREIGN KEY (MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ 
+ ALTER TABLE MV_TABLES_USED ADD CONSTRAINT MV_TABLES_USED_FK2 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_TIMESTAMP timestamp NULL;
+ 
+ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_TIMESTAMP = CURRENT_TIMESTAMP;
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP DEFAULT CURRENT_TIMESTAMP);
+ 
+ ALTER TABLE COMPLETED_TXN_COMPONENTS MODIFY(CTC_TIMESTAMP NOT NULL);
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ -- 049-HIVE-18489
+ UPDATE FUNC_RU
+   SET RESOURCE_URI = 's3a' || SUBSTR(RESOURCE_URI, 4)
+   WHERE RESOURCE_URI LIKE 's3n://%' ;
+ 
+ UPDATE SKEWED_COL_VALUE_LOC_MAP
+   SET LOCATION = 's3a' || SUBSTR(LOCATION, 4)
+   WHERE LOCATION LIKE 's3n://%' ;
+ 
+ UPDATE SDS
+   SET LOCATION = 's3a' || SUBSTR(LOCATION, 4)
+   WHERE LOCATION LIKE 's3n://%' ;
+ 
+ UPDATE DBS
+   SET DB_LOCATION_URI = 's3a' || SUBSTR(DB_LOCATION_URI, 4)
+   WHERE DB_LOCATION_URI LIKE 's3n://%' ;
+ 
+ -- HIVE-18192
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID NUMBER(19) NOT NULL,
+   T2W_DATABASE VARCHAR2(128) NOT NULL,
+   T2W_TABLE VARCHAR2(256) NOT NULL,
+   T2W_WRITEID NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE VARCHAR2(128) NOT NULL,
+   NWI_TABLE VARCHAR2(256) NOT NULL,
+   NWI_NEXT NUMBER(19) NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ ALTER TABLE COMPACTION_QUEUE RENAME COLUMN CQ_HIGHEST_TXN_ID TO CQ_HIGHEST_WRITE_ID;
+ 
+ ALTER TABLE COMPLETED_COMPACTIONS RENAME COLUMN CC_HIGHEST_TXN_ID TO CC_HIGHEST_WRITE_ID;
+ 
+ -- Modify txn_components/completed_txn_components tables to add write id.
+ ALTER TABLE TXN_COMPONENTS ADD TC_WRITEID number(19);
+ ALTER TABLE COMPLETED_TXN_COMPONENTS ADD CTC_WRITEID number(19);
+ 
+ -- HIVE-18726
+ -- add a new column to support default value for DEFAULT constraint
+ ALTER TABLE KEY_CONSTRAINTS ADD DEFAULT_VALUE VARCHAR(400);
+ ALTER TABLE KEY_CONSTRAINTS MODIFY (PARENT_CD_ID NULL);
+ 
+ ALTER TABLE HIVE_LOCKS MODIFY(HL_TXNID NOT NULL);
+ 
+ -- HIVE-18755, add catalogs
+ -- new catalogs table
+ CREATE TABLE CTLGS (
+     CTLG_ID NUMBER PRIMARY KEY,
+     "NAME" VARCHAR2(256),
+     "DESC" VARCHAR2(4000),
+     LOCATION_URI VARCHAR2(4000) NOT NULL,
+     UNIQUE ("NAME")
+ );
+ 
+ -- Insert a default value.  The location is TBD.  Hive will fix this when it starts
+ INSERT INTO CTLGS VALUES (1, 'hive', 'Default catalog for Hive', 'TBD');
+ 
+ -- Drop the unique index on DBS
+ DROP INDEX UNIQUE_DATABASE;
+ 
+ -- Add the new column to the DBS table, can't put in the not null constraint yet
+ ALTER TABLE DBS ADD CTLG_NAME VARCHAR2(256);
+ 
+ -- Update all records in the DBS table to point to the Hive catalog
+ UPDATE DBS 
+   SET "CTLG_NAME" = 'hive';
+ 
+ -- Add the not null constraint
+ ALTER TABLE DBS MODIFY CTLG_NAME NOT NULL;
+ 
+ -- Put back the unique index 
+ CREATE UNIQUE INDEX UNIQUE_DATABASE ON DBS ("NAME", CTLG_NAME);
+ 
+ -- Add the foreign key
+ ALTER TABLE DBS ADD CONSTRAINT CTLGS_FK FOREIGN KEY (CTLG_NAME) REFERENCES CTLGS ("NAME") INITIALLY DEFERRED;
+ 
+ -- Add columns to table stats and part stats
+ ALTER TABLE TAB_COL_STATS ADD CAT_NAME VARCHAR2(256);
+ ALTER TABLE PART_COL_STATS ADD CAT_NAME VARCHAR2(256);
+ 
+ -- Set the existing column names to Hive
+ UPDATE TAB_COL_STATS
+   SET CAT_NAME = 'hive';
+ UPDATE PART_COL_STATS
+   SET CAT_NAME = 'hive';
+ 
+ -- Add the not null constraint
+ ALTER TABLE TAB_COL_STATS MODIFY CAT_NAME NOT NULL;
+ ALTER TABLE PART_COL_STATS MODIFY CAT_NAME NOT NULL;
+ 
+ -- Rebuild the index for Part col stats.  No such index for table stats, which seems weird
+ DROP INDEX PCS_STATS_IDX;
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ -- Add column to partition events
+ ALTER TABLE PARTITION_EVENTS ADD CAT_NAME VARCHAR2(256);
+ UPDATE PARTITION_EVENTS
+   SET CAT_NAME = 'hive' WHERE DB_NAME IS NOT NULL;
+ 
+ -- Add column to notification log
+ ALTER TABLE NOTIFICATION_LOG ADD CAT_NAME VARCHAR2(256);
+ UPDATE NOTIFICATION_LOG
+   SET CAT_NAME = 'hive' WHERE DB_NAME IS NOT NULL;
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID number(19) NOT NULL,
+   RTM_TARGET_TXN_ID number(19) NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) SELECT 'org.apache.hadoop.hive.metastore.model.MNotificationLog',1 FROM DUAL WHERE NOT EXISTS ( SELECT NEXT_VAL FROM SEQUENCE_TABLE WHERE SEQUENCE_NAME = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
+ 
+ -- HIVE-18747
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID number(19) NOT NULL,
+   MHL_MIN_OPEN_TXNID number(19) NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID NUMBER primary key,
+   CREATE_TIME NUMBER(10) NOT NULL,
+   WEIGHT NUMBER(10) NOT NULL,
+   PAYLOAD BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ -- HIVE-18193
+ -- Populate NEXT_WRITE_ID for each Transactional table and set next write ID same as next txn ID
+ INSERT INTO NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE, NWI_NEXT)
+     SELECT * FROM
+         (SELECT DB.NAME, TBL_INFO.TBL_NAME FROM DBS DB,
+             (SELECT TBL.DB_ID, TBL.TBL_NAME FROM TBLS TBL,
+                 (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_KEY='transactional' AND to_char(PARAM_VALUE)='true') TBL_PARAM
+             WHERE TBL.TBL_ID=TBL_PARAM.TBL_ID) TBL_INFO
+         where DB.DB_ID=TBL_INFO.DB_ID) DB_TBL_NAME,
+         (SELECT NTXN_NEXT FROM NEXT_TXN_ID) NEXT_WRITE;
+ 
+ -- Populate TXN_TO_WRITE_ID for each aborted/open txns and set write ID equal to txn ID
+ INSERT INTO TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID, T2W_WRITEID)
+     SELECT * FROM
+         (SELECT DB.NAME, TBL_INFO.TBL_NAME FROM DBS DB,
+             (SELECT TBL.DB_ID, TBL.TBL_NAME FROM TBLS TBL,
+                 (SELECT TBL_ID FROM TABLE_PARAMS WHERE PARAM_KEY='transactional' AND to_char(PARAM_VALUE)='true') TBL_PARAM
+             WHERE TBL.TBL_ID=TBL_PARAM.TBL_ID) TBL_INFO
+         where DB.DB_ID=TBL_INFO.DB_ID) DB_TBL_NAME,
+         (SELECT TXN_ID, TXN_ID as WRITE_ID FROM TXNS) TXN_INFO;
+ 
+ -- Update TXN_COMPONENTS and COMPLETED_TXN_COMPONENTS for write ID which is same as txn ID
+ UPDATE TXN_COMPONENTS SET TC_WRITEID = TC_TXNID;
+ UPDATE COMPLETED_TXN_COMPONENTS SET CTC_WRITEID = CTC_TXNID;
+ 
+ ALTER TABLE TBLS ADD OWNER_TYPE VARCHAR2(10) NULL;
+ 
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='3.0.0', VERSION_COMMENT='Hive release version 3.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 2.3.0 to 3.0.0' AS Status from dual;
++

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
index 0000000,6fa5e2d..c94e6ec
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/oracle/upgrade-3.1.0-to-4.0.0.oracle.sql
@@@ -1,0 -1,6 +1,9 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ 
++ALTER TABLE TBLS ADD WRITE_ID number NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID number NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS Status from dual;
+ 


[90/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/pom.xml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
index 0000000,dd3a127..56e5043
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
@@@ -1,0 -1,955 +1,1162 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddPartitionsRequest implements org.apache.thrift.TBase<AddPartitionsRequest, AddPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddPartitionsRequest> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsRequest");
+ 
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("parts", org.apache.thrift.protocol.TType.LIST, (short)3);
+   private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4);
+   private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)7);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)8);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new AddPartitionsRequestStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new AddPartitionsRequestTupleSchemeFactory());
+   }
+ 
+   private String dbName; // required
+   private String tblName; // required
+   private List<Partition> parts; // required
+   private boolean ifNotExists; // required
+   private boolean needResult; // optional
+   private String catName; // optional
++  private long txnId; // optional
++  private String validWriteIdList; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     DB_NAME((short)1, "dbName"),
+     TBL_NAME((short)2, "tblName"),
+     PARTS((short)3, "parts"),
+     IF_NOT_EXISTS((short)4, "ifNotExists"),
+     NEED_RESULT((short)5, "needResult"),
 -    CAT_NAME((short)6, "catName");
++    CAT_NAME((short)6, "catName"),
++    TXN_ID((short)7, "txnId"),
++    VALID_WRITE_ID_LIST((short)8, "validWriteIdList");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // DB_NAME
+           return DB_NAME;
+         case 2: // TBL_NAME
+           return TBL_NAME;
+         case 3: // PARTS
+           return PARTS;
+         case 4: // IF_NOT_EXISTS
+           return IF_NOT_EXISTS;
+         case 5: // NEED_RESULT
+           return NEED_RESULT;
+         case 6: // CAT_NAME
+           return CAT_NAME;
++        case 7: // TXN_ID
++          return TXN_ID;
++        case 8: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
+   private static final int __IFNOTEXISTS_ISSET_ID = 0;
+   private static final int __NEEDRESULT_ISSET_ID = 1;
++  private static final int __TXNID_ISSET_ID = 2;
+   private byte __isset_bitfield = 0;
 -  private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME};
++  private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.PARTS, new org.apache.thrift.meta_data.FieldMetaData("parts", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+     tmpMap.put(_Fields.IF_NOT_EXISTS, new org.apache.thrift.meta_data.FieldMetaData("ifNotExists", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap);
+   }
+ 
+   public AddPartitionsRequest() {
+     this.needResult = true;
+ 
++    this.txnId = -1L;
++
+   }
+ 
+   public AddPartitionsRequest(
+     String dbName,
+     String tblName,
+     List<Partition> parts,
+     boolean ifNotExists)
+   {
+     this();
+     this.dbName = dbName;
+     this.tblName = tblName;
+     this.parts = parts;
+     this.ifNotExists = ifNotExists;
+     setIfNotExistsIsSet(true);
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public AddPartitionsRequest(AddPartitionsRequest other) {
+     __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetDbName()) {
+       this.dbName = other.dbName;
+     }
+     if (other.isSetTblName()) {
+       this.tblName = other.tblName;
+     }
+     if (other.isSetParts()) {
+       List<Partition> __this__parts = new ArrayList<Partition>(other.parts.size());
+       for (Partition other_element : other.parts) {
+         __this__parts.add(new Partition(other_element));
+       }
+       this.parts = __this__parts;
+     }
+     this.ifNotExists = other.ifNotExists;
+     this.needResult = other.needResult;
+     if (other.isSetCatName()) {
+       this.catName = other.catName;
+     }
++    this.txnId = other.txnId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
+   }
+ 
+   public AddPartitionsRequest deepCopy() {
+     return new AddPartitionsRequest(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.dbName = null;
+     this.tblName = null;
+     this.parts = null;
+     setIfNotExistsIsSet(false);
+     this.ifNotExists = false;
+     this.needResult = true;
+ 
+     this.catName = null;
++    this.txnId = -1L;
++
++    this.validWriteIdList = null;
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = dbName;
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getTblName() {
+     return this.tblName;
+   }
+ 
+   public void setTblName(String tblName) {
+     this.tblName = tblName;
+   }
+ 
+   public void unsetTblName() {
+     this.tblName = null;
+   }
+ 
+   /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTblName() {
+     return this.tblName != null;
+   }
+ 
+   public void setTblNameIsSet(boolean value) {
+     if (!value) {
+       this.tblName = null;
+     }
+   }
+ 
+   public int getPartsSize() {
+     return (this.parts == null) ? 0 : this.parts.size();
+   }
+ 
+   public java.util.Iterator<Partition> getPartsIterator() {
+     return (this.parts == null) ? null : this.parts.iterator();
+   }
+ 
+   public void addToParts(Partition elem) {
+     if (this.parts == null) {
+       this.parts = new ArrayList<Partition>();
+     }
+     this.parts.add(elem);
+   }
+ 
+   public List<Partition> getParts() {
+     return this.parts;
+   }
+ 
+   public void setParts(List<Partition> parts) {
+     this.parts = parts;
+   }
+ 
+   public void unsetParts() {
+     this.parts = null;
+   }
+ 
+   /** Returns true if field parts is set (has been assigned a value) and false otherwise */
+   public boolean isSetParts() {
+     return this.parts != null;
+   }
+ 
+   public void setPartsIsSet(boolean value) {
+     if (!value) {
+       this.parts = null;
+     }
+   }
+ 
+   public boolean isIfNotExists() {
+     return this.ifNotExists;
+   }
+ 
+   public void setIfNotExists(boolean ifNotExists) {
+     this.ifNotExists = ifNotExists;
+     setIfNotExistsIsSet(true);
+   }
+ 
+   public void unsetIfNotExists() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __IFNOTEXISTS_ISSET_ID);
+   }
+ 
+   /** Returns true if field ifNotExists is set (has been assigned a value) and false otherwise */
+   public boolean isSetIfNotExists() {
+     return EncodingUtils.testBit(__isset_bitfield, __IFNOTEXISTS_ISSET_ID);
+   }
+ 
+   public void setIfNotExistsIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __IFNOTEXISTS_ISSET_ID, value);
+   }
+ 
+   public boolean isNeedResult() {
+     return this.needResult;
+   }
+ 
+   public void setNeedResult(boolean needResult) {
+     this.needResult = needResult;
+     setNeedResultIsSet(true);
+   }
+ 
+   public void unsetNeedResult() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NEEDRESULT_ISSET_ID);
+   }
+ 
+   /** Returns true if field needResult is set (has been assigned a value) and false otherwise */
+   public boolean isSetNeedResult() {
+     return EncodingUtils.testBit(__isset_bitfield, __NEEDRESULT_ISSET_ID);
+   }
+ 
+   public void setNeedResultIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value);
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = catName;
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case TBL_NAME:
+       if (value == null) {
+         unsetTblName();
+       } else {
+         setTblName((String)value);
+       }
+       break;
+ 
+     case PARTS:
+       if (value == null) {
+         unsetParts();
+       } else {
+         setParts((List<Partition>)value);
+       }
+       break;
+ 
+     case IF_NOT_EXISTS:
+       if (value == null) {
+         unsetIfNotExists();
+       } else {
+         setIfNotExists((Boolean)value);
+       }
+       break;
+ 
+     case NEED_RESULT:
+       if (value == null) {
+         unsetNeedResult();
+       } else {
+         setNeedResult((Boolean)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case DB_NAME:
+       return getDbName();
+ 
+     case TBL_NAME:
+       return getTblName();
+ 
+     case PARTS:
+       return getParts();
+ 
+     case IF_NOT_EXISTS:
+       return isIfNotExists();
+ 
+     case NEED_RESULT:
+       return isNeedResult();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
++    case TXN_ID:
++      return getTxnId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case DB_NAME:
+       return isSetDbName();
+     case TBL_NAME:
+       return isSetTblName();
+     case PARTS:
+       return isSetParts();
+     case IF_NOT_EXISTS:
+       return isSetIfNotExists();
+     case NEED_RESULT:
+       return isSetNeedResult();
+     case CAT_NAME:
+       return isSetCatName();
++    case TXN_ID:
++      return isSetTxnId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof AddPartitionsRequest)
+       return this.equals((AddPartitionsRequest)that);
+     return false;
+   }
+ 
+   public boolean equals(AddPartitionsRequest that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_tblName = true && this.isSetTblName();
+     boolean that_present_tblName = true && that.isSetTblName();
+     if (this_present_tblName || that_present_tblName) {
+       if (!(this_present_tblName && that_present_tblName))
+         return false;
+       if (!this.tblName.equals(that.tblName))
+         return false;
+     }
+ 
+     boolean this_present_parts = true && this.isSetParts();
+     boolean that_present_parts = true && that.isSetParts();
+     if (this_present_parts || that_present_parts) {
+       if (!(this_present_parts && that_present_parts))
+         return false;
+       if (!this.parts.equals(that.parts))
+         return false;
+     }
+ 
+     boolean this_present_ifNotExists = true;
+     boolean that_present_ifNotExists = true;
+     if (this_present_ifNotExists || that_present_ifNotExists) {
+       if (!(this_present_ifNotExists && that_present_ifNotExists))
+         return false;
+       if (this.ifNotExists != that.ifNotExists)
+         return false;
+     }
+ 
+     boolean this_present_needResult = true && this.isSetNeedResult();
+     boolean that_present_needResult = true && that.isSetNeedResult();
+     if (this_present_needResult || that_present_needResult) {
+       if (!(this_present_needResult && that_present_needResult))
+         return false;
+       if (this.needResult != that.needResult)
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_tblName = true && (isSetTblName());
+     list.add(present_tblName);
+     if (present_tblName)
+       list.add(tblName);
+ 
+     boolean present_parts = true && (isSetParts());
+     list.add(present_parts);
+     if (present_parts)
+       list.add(parts);
+ 
+     boolean present_ifNotExists = true;
+     list.add(present_ifNotExists);
+     if (present_ifNotExists)
+       list.add(ifNotExists);
+ 
+     boolean present_needResult = true && (isSetNeedResult());
+     list.add(present_needResult);
+     if (present_needResult)
+       list.add(needResult);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(AddPartitionsRequest other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTblName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetParts()).compareTo(other.isSetParts());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetParts()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parts, other.parts);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetIfNotExists()).compareTo(other.isSetIfNotExists());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetIfNotExists()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ifNotExists, other.ifNotExists);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetNeedResult()).compareTo(other.isSetNeedResult());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetNeedResult()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.needResult, other.needResult);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("AddPartitionsRequest(");
+     boolean first = true;
+ 
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tblName:");
+     if (this.tblName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tblName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("parts:");
+     if (this.parts == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.parts);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("ifNotExists:");
+     sb.append(this.ifNotExists);
+     first = false;
+     if (isSetNeedResult()) {
+       if (!first) sb.append(", ");
+       sb.append("needResult:");
+       sb.append(this.needResult);
+       first = false;
+     }
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetDbName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetTblName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetParts()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'parts' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetIfNotExists()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'ifNotExists' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
+       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+       __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class AddPartitionsRequestStandardSchemeFactory implements SchemeFactory {
+     public AddPartitionsRequestStandardScheme getScheme() {
+       return new AddPartitionsRequestStandardScheme();
+     }
+   }
+ 
+   private static class AddPartitionsRequestStandardScheme extends StandardScheme<AddPartitionsRequest> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = iprot.readString();
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // TBL_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tblName = iprot.readString();
+               struct.setTblNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // PARTS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list482 = iprot.readListBegin();
+                 struct.parts = new ArrayList<Partition>(_list482.size);
+                 Partition _elem483;
+                 for (int _i484 = 0; _i484 < _list482.size; ++_i484)
+                 {
+                   _elem483 = new Partition();
+                   _elem483.read(iprot);
+                   struct.parts.add(_elem483);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setPartsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // IF_NOT_EXISTS
+             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+               struct.ifNotExists = iprot.readBool();
+               struct.setIfNotExistsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 5: // NEED_RESULT
+             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+               struct.needResult = iprot.readBool();
+               struct.setNeedResultIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 6: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = iprot.readString();
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 7: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 8: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tblName != null) {
+         oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+         oprot.writeString(struct.tblName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.parts != null) {
+         oprot.writeFieldBegin(PARTS_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size()));
+           for (Partition _iter485 : struct.parts)
+           {
+             _iter485.write(oprot);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       oprot.writeFieldBegin(IF_NOT_EXISTS_FIELD_DESC);
+       oprot.writeBool(struct.ifNotExists);
+       oprot.writeFieldEnd();
+       if (struct.isSetNeedResult()) {
+         oprot.writeFieldBegin(NEED_RESULT_FIELD_DESC);
+         oprot.writeBool(struct.needResult);
+         oprot.writeFieldEnd();
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class AddPartitionsRequestTupleSchemeFactory implements SchemeFactory {
+     public AddPartitionsRequestTupleScheme getScheme() {
+       return new AddPartitionsRequestTupleScheme();
+     }
+   }
+ 
+   private static class AddPartitionsRequestTupleScheme extends TupleScheme<AddPartitionsRequest> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       oprot.writeString(struct.dbName);
+       oprot.writeString(struct.tblName);
+       {
+         oprot.writeI32(struct.parts.size());
+         for (Partition _iter486 : struct.parts)
+         {
+           _iter486.write(oprot);
+         }
+       }
+       oprot.writeBool(struct.ifNotExists);
+       BitSet optionals = new BitSet();
+       if (struct.isSetNeedResult()) {
+         optionals.set(0);
+       }
+       if (struct.isSetCatName()) {
+         optionals.set(1);
+       }
 -      oprot.writeBitSet(optionals, 2);
++      if (struct.isSetTxnId()) {
++        optionals.set(2);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(3);
++      }
++      oprot.writeBitSet(optionals, 4);
+       if (struct.isSetNeedResult()) {
+         oprot.writeBool(struct.needResult);
+       }
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       struct.dbName = iprot.readString();
+       struct.setDbNameIsSet(true);
+       struct.tblName = iprot.readString();
+       struct.setTblNameIsSet(true);
+       {
+         org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+         struct.parts = new ArrayList<Partition>(_list487.size);
+         Partition _elem488;
+         for (int _i489 = 0; _i489 < _list487.size; ++_i489)
+         {
+           _elem488 = new Partition();
+           _elem488.read(iprot);
+           struct.parts.add(_elem488);
+         }
+       }
+       struct.setPartsIsSet(true);
+       struct.ifNotExists = iprot.readBool();
+       struct.setIfNotExistsIsSet(true);
 -      BitSet incoming = iprot.readBitSet(2);
++      BitSet incoming = iprot.readBitSet(4);
+       if (incoming.get(0)) {
+         struct.needResult = iprot.readBool();
+         struct.setNeedResultIsSet(true);
+       }
+       if (incoming.get(1)) {
+         struct.catName = iprot.readString();
+         struct.setCatNameIsSet(true);
+       }
++      if (incoming.get(2)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(3)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
index 0000000,fe41b8c..3c0bf82
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
@@@ -1,0 -1,447 +1,550 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddPartitionsResult implements org.apache.thrift.TBase<AddPartitionsResult, AddPartitionsResult._Fields>, java.io.Serializable, Cloneable, Comparable<AddPartitionsResult> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult");
+ 
+   private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new AddPartitionsResultStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new AddPartitionsResultTupleSchemeFactory());
+   }
+ 
+   private List<Partition> partitions; // optional
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
 -    PARTITIONS((short)1, "partitions");
++    PARTITIONS((short)1, "partitions"),
++    IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // PARTITIONS
+           return PARTITIONS;
++        case 2: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
 -  private static final _Fields optionals[] = {_Fields.PARTITIONS};
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.PARTITIONS,_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsResult.class, metaDataMap);
+   }
+ 
+   public AddPartitionsResult() {
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public AddPartitionsResult(AddPartitionsResult other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetPartitions()) {
+       List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
+       for (Partition other_element : other.partitions) {
+         __this__partitions.add(new Partition(other_element));
+       }
+       this.partitions = __this__partitions;
+     }
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public AddPartitionsResult deepCopy() {
+     return new AddPartitionsResult(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.partitions = null;
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public int getPartitionsSize() {
+     return (this.partitions == null) ? 0 : this.partitions.size();
+   }
+ 
+   public java.util.Iterator<Partition> getPartitionsIterator() {
+     return (this.partitions == null) ? null : this.partitions.iterator();
+   }
+ 
+   public void addToPartitions(Partition elem) {
+     if (this.partitions == null) {
+       this.partitions = new ArrayList<Partition>();
+     }
+     this.partitions.add(elem);
+   }
+ 
+   public List<Partition> getPartitions() {
+     return this.partitions;
+   }
+ 
+   public void setPartitions(List<Partition> partitions) {
+     this.partitions = partitions;
+   }
+ 
+   public void unsetPartitions() {
+     this.partitions = null;
+   }
+ 
+   /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+   public boolean isSetPartitions() {
+     return this.partitions != null;
+   }
+ 
+   public void setPartitionsIsSet(boolean value) {
+     if (!value) {
+       this.partitions = null;
+     }
+   }
+ 
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case PARTITIONS:
+       if (value == null) {
+         unsetPartitions();
+       } else {
+         setPartitions((List<Partition>)value);
+       }
+       break;
+ 
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case PARTITIONS:
+       return getPartitions();
+ 
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case PARTITIONS:
+       return isSetPartitions();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof AddPartitionsResult)
+       return this.equals((AddPartitionsResult)that);
+     return false;
+   }
+ 
+   public boolean equals(AddPartitionsResult that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_partitions = true && this.isSetPartitions();
+     boolean that_present_partitions = true && that.isSetPartitions();
+     if (this_present_partitions || that_present_partitions) {
+       if (!(this_present_partitions && that_present_partitions))
+         return false;
+       if (!this.partitions.equals(that.partitions))
+         return false;
+     }
+ 
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_partitions = true && (isSetPartitions());
+     list.add(present_partitions);
+     if (present_partitions)
+       list.add(partitions);
+ 
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(AddPartitionsResult other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPartitions()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("AddPartitionsResult(");
+     boolean first = true;
+ 
+     if (isSetPartitions()) {
+       sb.append("partitions:");
+       if (this.partitions == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.partitions);
+       }
+       first = false;
+     }
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class AddPartitionsResultStandardSchemeFactory implements SchemeFactory {
+     public AddPartitionsResultStandardScheme getScheme() {
+       return new AddPartitionsResultStandardScheme();
+     }
+   }
+ 
+   private static class AddPartitionsResultStandardScheme extends StandardScheme<AddPartitionsResult> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // PARTITIONS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list474 = iprot.readListBegin();
+                 struct.partitions = new ArrayList<Partition>(_list474.size);
+                 Partition _elem475;
+                 for (int _i476 = 0; _i476 < _list474.size; ++_i476)
+                 {
+                   _elem475 = new Partition();
+                   _elem475.read(iprot);
+                   struct.partitions.add(_elem475);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setPartitionsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 2: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.partitions != null) {
+         if (struct.isSetPartitions()) {
+           oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+           {
+             oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+             for (Partition _iter477 : struct.partitions)
+             {
+               _iter477.write(oprot);
+             }
+             oprot.writeListEnd();
+           }
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class AddPartitionsResultTupleSchemeFactory implements SchemeFactory {
+     public AddPartitionsResultTupleScheme getScheme() {
+       return new AddPartitionsResultTupleScheme();
+     }
+   }
+ 
+   private static class AddPartitionsResultTupleScheme extends TupleScheme<AddPartitionsResult> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       BitSet optionals = new BitSet();
+       if (struct.isSetPartitions()) {
+         optionals.set(0);
+       }
 -      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(1);
++      }
++      oprot.writeBitSet(optionals, 2);
+       if (struct.isSetPartitions()) {
+         {
+           oprot.writeI32(struct.partitions.size());
+           for (Partition _iter478 : struct.partitions)
+           {
+             _iter478.write(oprot);
+           }
+         }
+       }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
 -      BitSet incoming = iprot.readBitSet(1);
++      BitSet incoming = iprot.readBitSet(2);
+       if (incoming.get(0)) {
+         {
+           org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+           struct.partitions = new ArrayList<Partition>(_list479.size);
+           Partition _elem480;
+           for (int _i481 = 0; _i481 < _list479.size; ++_i481)
+           {
+             _elem480 = new Partition();
+             _elem480.read(iprot);
+             struct.partitions.add(_elem480);
+           }
+         }
+         struct.setPartitionsIsSet(true);
+       }
++      if (incoming.get(1)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
index 0000000,fff212d..87b8fea
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
@@@ -1,0 -1,542 +1,645 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AggrStats implements org.apache.thrift.TBase<AggrStats, AggrStats._Fields>, java.io.Serializable, Cloneable, Comparable<AggrStats> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AggrStats");
+ 
+   private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
+   private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)3);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new AggrStatsStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new AggrStatsTupleSchemeFactory());
+   }
+ 
+   private List<ColumnStatisticsObj> colStats; // required
+   private long partsFound; // required
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     COL_STATS((short)1, "colStats"),
 -    PARTS_FOUND((short)2, "partsFound");
++    PARTS_FOUND((short)2, "partsFound"),
++    IS_STATS_COMPLIANT((short)3, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // COL_STATS
+           return COL_STATS;
+         case 2: // PARTS_FOUND
+           return PARTS_FOUND;
++        case 3: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
+   private static final int __PARTSFOUND_ISSET_ID = 0;
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1;
+   private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.COL_STATS, new org.apache.thrift.meta_data.FieldMetaData("colStats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+     tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap);
+   }
+ 
+   public AggrStats() {
+   }
+ 
+   public AggrStats(
+     List<ColumnStatisticsObj> colStats,
+     long partsFound)
+   {
+     this();
+     this.colStats = colStats;
+     this.partsFound = partsFound;
+     setPartsFoundIsSet(true);
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public AggrStats(AggrStats other) {
+     __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetColStats()) {
+       List<ColumnStatisticsObj> __this__colStats = new ArrayList<ColumnStatisticsObj>(other.colStats.size());
+       for (ColumnStatisticsObj other_element : other.colStats) {
+         __this__colStats.add(new ColumnStatisticsObj(other_element));
+       }
+       this.colStats = __this__colStats;
+     }
+     this.partsFound = other.partsFound;
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public AggrStats deepCopy() {
+     return new AggrStats(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.colStats = null;
+     setPartsFoundIsSet(false);
+     this.partsFound = 0;
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public int getColStatsSize() {
+     return (this.colStats == null) ? 0 : this.colStats.size();
+   }
+ 
+   public java.util.Iterator<ColumnStatisticsObj> getColStatsIterator() {
+     return (this.colStats == null) ? null : this.colStats.iterator();
+   }
+ 
+   public void addToColStats(ColumnStatisticsObj elem) {
+     if (this.colStats == null) {
+       this.colStats = new ArrayList<ColumnStatisticsObj>();
+     }
+     this.colStats.add(elem);
+   }
+ 
+   public List<ColumnStatisticsObj> getColStats() {
+     return this.colStats;
+   }
+ 
+   public void setColStats(List<ColumnStatisticsObj> colStats) {
+     this.colStats = colStats;
+   }
+ 
+   public void unsetColStats() {
+     this.colStats = null;
+   }
+ 
+   /** Returns true if field colStats is set (has been assigned a value) and false otherwise */
+   public boolean isSetColStats() {
+     return this.colStats != null;
+   }
+ 
+   public void setColStatsIsSet(boolean value) {
+     if (!value) {
+       this.colStats = null;
+     }
+   }
+ 
+   public long getPartsFound() {
+     return this.partsFound;
+   }
+ 
+   public void setPartsFound(long partsFound) {
+     this.partsFound = partsFound;
+     setPartsFoundIsSet(true);
+   }
+ 
+   public void unsetPartsFound() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PARTSFOUND_ISSET_ID);
+   }
+ 
+   /** Returns true if field partsFound is set (has been assigned a value) and false otherwise */
+   public boolean isSetPartsFound() {
+     return EncodingUtils.testBit(__isset_bitfield, __PARTSFOUND_ISSET_ID);
+   }
+ 
+   public void setPartsFoundIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value);
+   }
+ 
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case COL_STATS:
+       if (value == null) {
+         unsetColStats();
+       } else {
+         setColStats((List<ColumnStatisticsObj>)value);
+       }
+       break;
+ 
+     case PARTS_FOUND:
+       if (value == null) {
+         unsetPartsFound();
+       } else {
+         setPartsFound((Long)value);
+       }
+       break;
+ 
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case COL_STATS:
+       return getColStats();
+ 
+     case PARTS_FOUND:
+       return getPartsFound();
+ 
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case COL_STATS:
+       return isSetColStats();
+     case PARTS_FOUND:
+       return isSetPartsFound();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof AggrStats)
+       return this.equals((AggrStats)that);
+     return false;
+   }
+ 
+   public boolean equals(AggrStats that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_colStats = true && this.isSetColStats();
+     boolean that_present_colStats = true && that.isSetColStats();
+     if (this_present_colStats || that_present_colStats) {
+       if (!(this_present_colStats && that_present_colStats))
+         return false;
+       if (!this.colStats.equals(that.colStats))
+         return false;
+     }
+ 
+     boolean this_present_partsFound = true;
+     boolean that_present_partsFound = true;
+     if (this_present_partsFound || that_present_partsFound) {
+       if (!(this_present_partsFound && that_present_partsFound))
+         return false;
+       if (this.partsFound != that.partsFound)
+         return false;
+     }
+ 
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_colStats = true && (isSetColStats());
+     list.add(present_colStats);
+     if (present_colStats)
+       list.add(colStats);
+ 
+     boolean present_partsFound = true;
+     list.add(present_partsFound);
+     if (present_partsFound)
+       list.add(partsFound);
+ 
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(AggrStats other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetColStats()).compareTo(other.isSetColStats());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetColStats()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colStats, other.colStats);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetPartsFound()).compareTo(other.isSetPartsFound());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPartsFound()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partsFound, other.partsFound);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("AggrStats(");
+     boolean first = true;
+ 
+     sb.append("colStats:");
+     if (this.colStats == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.colStats);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("partsFound:");
+     sb.append(this.partsFound);
+     first = false;
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetColStats()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'colStats' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetPartsFound()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'partsFound' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
+       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+       __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class AggrStatsStandardSchemeFactory implements SchemeFactory {
+     public AggrStatsStandardScheme getScheme() {
+       return new AggrStatsStandardScheme();
+     }
+   }
+ 
+   private static class AggrStatsStandardScheme extends StandardScheme<AggrStats> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // COL_STATS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list276 = iprot.readListBegin();
+                 struct.colStats = new ArrayList<ColumnStatisticsObj>(_list276.size);
+                 ColumnStatisticsObj _elem277;
+                 for (int _i278 = 0; _i278 < _list276.size; ++_i278)
+                 {
+                   _elem277 = new ColumnStatisticsObj();
+                   _elem277.read(iprot);
+                   struct.colStats.add(_elem277);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setColStatsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // PARTS_FOUND
+             if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+               struct.partsFound = iprot.readI64();
+               struct.setPartsFoundIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 3: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.colStats != null) {
+         oprot.writeFieldBegin(COL_STATS_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size()));
+           for (ColumnStatisticsObj _iter279 : struct.colStats)
+           {
+             _iter279.write(oprot);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC);
+       oprot.writeI64(struct.partsFound);
+       oprot.writeFieldEnd();
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class AggrStatsTupleSchemeFactory implements SchemeFactory {
+     public AggrStatsTupleScheme getScheme() {
+       return new AggrStatsTupleScheme();
+     }
+   }
+ 
+   private static class AggrStatsTupleScheme extends TupleScheme<AggrStats> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       {
+         oprot.writeI32(struct.colStats.size());
+         for (ColumnStatisticsObj _iter280 : struct.colStats)
+         {
+           _iter280.write(oprot);
+         }
+       }
+       oprot.writeI64(struct.partsFound);
++      BitSet optionals = new BitSet();
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(0);
++      }
++      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       {
+         org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+         struct.colStats = new ArrayList<ColumnStatisticsObj>(_list281.size);
+         ColumnStatisticsObj _elem282;
+         for (int _i283 = 0; _i283 < _list281.size; ++_i283)
+         {
+           _elem282 = new ColumnStatisticsObj();
+           _elem282.read(iprot);
+           struct.colStats.add(_elem282);
+         }
+       }
+       struct.setColStatsIsSet(true);
+       struct.partsFound = iprot.readI64();
+       struct.setPartsFoundIsSet(true);
++      BitSet incoming = iprot.readBitSet(1);
++      if (incoming.get(0)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[52/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
index 0000000,54bf3d7..9b9b101
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/client/TestAlterPartitions.java
@@@ -1,0 -1,1117 +1,1130 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.client;
+ 
++import java.net.ProtocolException;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.List;
+ 
+ import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.minihms.AbstractMetaStoreService;
+ import org.apache.thrift.TException;
++import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.transport.TTransportException;
+ 
+ import com.google.common.collect.Lists;
+ 
+ import org.junit.After;
+ import org.junit.AfterClass;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.junit.runner.RunWith;
+ import org.junit.runners.Parameterized;
+ 
+ import static java.util.stream.Collectors.joining;
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.junit.Assert.assertEquals;
+ import static org.junit.Assert.assertFalse;
+ import static org.junit.Assert.assertNotEquals;
+ import static org.junit.Assert.assertTrue;
+ import static org.junit.Assert.fail;
+ 
+ /**
+  * API tests for HMS client's  alterPartitions methods.
+  */
+ @RunWith(Parameterized.class)
+ @Category(MetastoreCheckinTest.class)
+ public class TestAlterPartitions extends MetaStoreClientTest {
+   private static final int NEW_CREATE_TIME = 123456789;
+   private AbstractMetaStoreService metaStore;
+   private IMetaStoreClient client;
+ 
+   private static final String DB_NAME = "testpartdb";
+   private static final String TABLE_NAME = "testparttable";
+   private static final List<String> PARTCOL_SCHEMA = Lists.newArrayList("yyyy", "mm", "dd");
+ 
+   public TestAlterPartitions(String name, AbstractMetaStoreService metaStore) {
+     this.metaStore = metaStore;
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     // Get new client
+     client = metaStore.getClient();
+ 
+     // Clean up the database
+     client.dropDatabase(DB_NAME, true, true, true);
+     metaStore.cleanWarehouseDirs();
+     createDB(DB_NAME);
+   }
+ 
+   @After
+   public void tearDown() throws Exception {
+     try {
+       if (client != null) {
+         try {
+           client.close();
+         } catch (Exception e) {
+           // HIVE-19729: Shallow the exceptions based on the discussion in the Jira
+         }
+       }
+     } finally {
+       client = null;
+     }
+   }
+ 
+   private void createDB(String dbName) throws TException {
+     new DatabaseBuilder().
+             setName(dbName).
+             create(client, metaStore.getConf());
+   }
+ 
+   private Table createTestTable(IMetaStoreClient client, String dbName, String tableName,
+                                        List<String> partCols, boolean setPartitionLevelPrivilages)
+           throws Exception {
+     TableBuilder builder = new TableBuilder()
+             .setDbName(dbName)
+             .setTableName(tableName)
+             .addCol("id", "int")
+             .addCol("name", "string");
+ 
+     partCols.forEach(col -> builder.addPartCol(col, "string"));
+     Table table = builder.build(metaStore.getConf());
+ 
+     if (setPartitionLevelPrivilages) {
+       table.putToParameters("PARTITION_LEVEL_PRIVILEGE", "true");
+     }
+ 
+     client.createTable(table);
+     return table;
+   }
+ 
+   private void addPartition(IMetaStoreClient client, Table table, List<String> values)
+           throws TException {
+     PartitionBuilder partitionBuilder = new PartitionBuilder().inTable(table);
+     values.forEach(val -> partitionBuilder.addValue(val));
+     client.add_partition(partitionBuilder.build(metaStore.getConf()));
+   }
+ 
+   private List<List<String>> createTable4PartColsParts(IMetaStoreClient client) throws
+           Exception {
+     Table t = createTestTable(client, DB_NAME, TABLE_NAME, PARTCOL_SCHEMA, false);
+     List<List<String>> testValues = Lists.newArrayList(
+             Lists.newArrayList("1999", "01", "02"),
+             Lists.newArrayList("2009", "02", "10"),
+             Lists.newArrayList("2017", "10", "26"),
+             Lists.newArrayList("2017", "11", "27"));
+ 
+     for(List<String> vals : testValues){
+       addPartition(client, t, vals);
+     }
+ 
+     return testValues;
+   }
+ 
+   private static void assertPartitionsHaveCorrectValues(List<Partition> partitions,
+                                     List<List<String>> testValues) throws Exception {
+     assertEquals(testValues.size(), partitions.size());
+     for (int i = 0; i < partitions.size(); ++i) {
+       assertEquals(testValues.get(i), partitions.get(i).getValues());
+     }
+   }
+ 
+   private static void makeTestChangesOnPartition(Partition partition) {
+     partition.getParameters().put("hmsTestParam001", "testValue001");
+     partition.setCreateTime(NEW_CREATE_TIME);
+     partition.setLastAccessTime(NEW_CREATE_TIME);
+     partition.getSd().setLocation(partition.getSd().getLocation()+"/hh=01");
+     partition.getSd().getCols().add(new FieldSchema("newcol", "string", ""));
+   }
+ 
+   private void assertPartitionUnchanged(Partition partition, List<String> testValues,
+                                                List<String> partCols) throws MetaException {
+     assertFalse(partition.getParameters().containsKey("hmsTestParam001"));
+ 
+     List<String> expectedKVPairs = new ArrayList<>();
+     for (int i = 0; i < partCols.size(); ++i) {
+       expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i));
+     }
+     String partPath = expectedKVPairs.stream().collect(joining("/"));
+     assertTrue(partition.getSd().getLocation().equals(metaStore.getWarehouseRoot()
+         + "/testpartdb.db/testparttable/" + partPath));
+     assertNotEquals(NEW_CREATE_TIME, partition.getCreateTime());
+     assertNotEquals(NEW_CREATE_TIME, partition.getLastAccessTime());
+     assertEquals(2, partition.getSd().getCols().size());
+   }
+ 
+   private void assertPartitionChanged(Partition partition, List<String> testValues,
+                                       List<String> partCols) throws MetaException {
+     assertEquals("testValue001", partition.getParameters().get("hmsTestParam001"));
+ 
+     List<String> expectedKVPairs = new ArrayList<>();
+     for (int i = 0; i < partCols.size(); ++i) {
+       expectedKVPairs.add(partCols.get(i) + "=" + testValues.get(i));
+     }
+     String partPath = expectedKVPairs.stream().collect(joining("/"));
+     assertTrue(partition.getSd().getLocation().equals(metaStore.getWarehouseRoot()
+         + "/testpartdb.db/testparttable/" + partPath + "/hh=01"));
+     assertEquals(NEW_CREATE_TIME, partition.getCreateTime());
+     assertEquals(NEW_CREATE_TIME, partition.getLastAccessTime());
+     assertEquals(3, partition.getSd().getCols().size());
+   }
+ 
+ 
+ 
+   /**
+    * Testing alter_partition(String,String,Partition) ->
+    *         alter_partition_with_environment_context(String,String,Partition,null).
+    */
+   @Test
+   public void testAlterPartition() throws Exception {
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition oldPart = oldParts.get(3);
+ 
+     assertPartitionUnchanged(oldPart, testValues.get(3), PARTCOL_SCHEMA);
+     makeTestChangesOnPartition(oldPart);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, oldPart);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition newPart = newParts.get(3);
+     assertPartitionChanged(newPart, testValues.get(3), PARTCOL_SCHEMA);
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+   }
+ 
+   @Test
+   public void otherCatalog() throws TException {
+     String catName = "alter_partition_catalog";
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation(MetaStoreTestUtils.getTestWarehouseDir(catName))
+         .build();
+     client.createCatalog(cat);
+ 
+     String dbName = "alter_partition_database_in_other_catalog";
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setCatalogName(catName)
+         .create(client, metaStore.getConf());
+ 
+     String tableName = "table_in_other_catalog";
+     Table table = new TableBuilder()
+         .inDb(db)
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition[] parts = new Partition[5];
+     for (int i = 0; i < 5; i++) {
+       parts[i] = new PartitionBuilder()
+           .inTable(table)
+           .addValue("a" + i)
+           .setLocation(MetaStoreTestUtils.getTestWarehouseDir("b" + i))
+           .build(metaStore.getConf());
+     }
+     client.add_partitions(Arrays.asList(parts));
+ 
+     Partition newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
+     newPart.getParameters().put("test_key", "test_value");
+     client.alter_partition(catName, dbName, tableName, newPart);
+ 
+     Partition fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a0"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+     newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     newPart.setLastAccessTime(3);
+     Partition newPart1 =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+     newPart1.getSd().setLocation(MetaStoreTestUtils.getTestWarehouseDir("somewhere"));
+     client.alter_partitions(catName, dbName, tableName, Arrays.asList(newPart, newPart1));
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals(3L, fetched.getLastAccessTime());
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a2"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
+ 
+     newPart =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a4"));
+     newPart.getParameters().put("test_key", "test_value");
+     EnvironmentContext ec = new EnvironmentContext();
+     ec.setProperties(Collections.singletonMap("a", "b"));
+     client.alter_partition(catName, dbName, tableName, newPart, ec);
+     fetched =
+         client.getPartition(catName, dbName, tableName, Collections.singletonList("a4"));
+     Assert.assertEquals(catName, fetched.getCatName());
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+ 
+     client.dropDatabase(catName, dbName, true, true, true);
+     client.dropCatalog(catName);
+   }
+ 
+   @SuppressWarnings("deprecation")
+   @Test
+   public void deprecatedCalls() throws TException {
+     String tableName = "deprecated_table";
+     Table table = new TableBuilder()
+         .setTableName(tableName)
+         .addCol("id", "int")
+         .addCol("name", "string")
+         .addPartCol("partcol", "string")
+         .create(client, metaStore.getConf());
+ 
+     Partition[] parts = new Partition[5];
+     for (int i = 0; i < 5; i++) {
+       parts[i] = new PartitionBuilder()
+           .inTable(table)
+           .addValue("a" + i)
+           .setLocation(MetaStoreTestUtils.getTestWarehouseDir("a" + i))
+           .build(metaStore.getConf());
+     }
+     client.add_partitions(Arrays.asList(parts));
+ 
+     Partition newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0"));
+     newPart.getParameters().put("test_key", "test_value");
+     client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart);
+ 
+     Partition fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a0"));
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1"));
+     newPart.setLastAccessTime(3);
+     Partition newPart1 =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2"));
+     newPart1.getSd().setLocation("somewhere");
+     client.alter_partitions(DEFAULT_DATABASE_NAME, tableName, Arrays.asList(newPart, newPart1));
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a1"));
+     Assert.assertEquals(3L, fetched.getLastAccessTime());
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a2"));
+     Assert.assertTrue(fetched.getSd().getLocation().contains("somewhere"));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"));
+     newPart.setValues(Collections.singletonList("b3"));
+     client.renamePartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a3"), newPart);
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("b3"));
+     Assert.assertEquals(1, fetched.getValuesSize());
+     Assert.assertEquals("b3", fetched.getValues().get(0));
+ 
+     newPart =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4"));
+     newPart.getParameters().put("test_key", "test_value");
+     EnvironmentContext ec = new EnvironmentContext();
+     ec.setProperties(Collections.singletonMap("a", "b"));
+     client.alter_partition(DEFAULT_DATABASE_NAME, tableName, newPart, ec);
+     fetched =
+         client.getPartition(DEFAULT_DATABASE_NAME, tableName, Collections.singletonList("a4"));
+     Assert.assertEquals("test_value", fetched.getParameters().get("test_key"));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("nosuch", DB_NAME, TABLE_NAME, partitions.get(3));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("", TABLE_NAME, partitions.get(3));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(null, TABLE_NAME, partitions.get(3));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, "", partitions.get(3));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, null, partitions.get(3));
+   }
+ 
+   @Test
+   public void testAlterPartitionNullPartition() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+       client.alter_partition(DB_NAME, TABLE_NAME, null);
+       fail("Should have thrown exception");
+     } catch (NullPointerException | TTransportException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setDbName(DB_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setTableName(TABLE_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partition(DB_NAME, TABLE_NAME, partition);
+   }
+ 
+ 
+   /**
+    * Testing alter_partition(String,String,Partition,EnvironmentContext) ->
+    *         alter_partition_with_environment_context(String,String,Partition,EnvironmentContext).
+    */
+   @Test
+   public void testAlterPartitionWithEnvironmentCtx() throws Exception {
+     EnvironmentContext context = new EnvironmentContext();
+     context.setProperties(new HashMap<String, String>(){
+       {
+         put("TestKey", "TestValue");
+       }
+     });
+ 
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = oldParts.get(3);
+ 
+     assertPartitionUnchanged(partition, testValues.get(3), PARTCOL_SCHEMA);
+     makeTestChangesOnPartition(partition);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, context);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     partition = newParts.get(3);
+     assertPartitionChanged(partition, testValues.get(3), PARTCOL_SCHEMA);
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, null);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     client.alter_partition(DB_NAME, TABLE_NAME, part, new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition("", TABLE_NAME, partitions.get(3), new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(null, TABLE_NAME, partitions.get(3), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, "", partitions.get(3), new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     client.alter_partition(DB_NAME, null, partitions.get(3), new EnvironmentContext());
+   }
+ 
+   @Test
+   public void testAlterPartitionWithEnvironmentCtxNullPartition() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1);
+       client.alter_partition(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
+       fail("Should have thrown exception");
+     } catch (NullPointerException | TTransportException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setDbName(DB_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setTableName(TABLE_NAME+"_changed");
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionWithEnvironmentCtxChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition partition = partitions.get(3);
+     partition.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partition(DB_NAME, TABLE_NAME, partition, new EnvironmentContext());
+   }
+ 
+ 
+ 
+   /**
+    * Testing
+    *    alter_partitions(String,String,List(Partition)) ->
+    *    alter_partitions_with_environment_context(String,String,List(Partition),null).
+    */
+   @Test
+   public void testAlterPartitions() throws Exception {
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionUnchanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     oldParts.forEach(p -> makeTestChangesOnPartition(p));
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, oldParts);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsEmptyPartitionList() throws Exception {
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList());
+   }
+ 
+   @Test
+   public void testAlterPartitionsUnknownPartition() throws Exception {
+     Partition part1 = null;
+     try {
+       createTable4PartColsParts(client);
+       Table t = client.getTable(DB_NAME, TABLE_NAME);
+       PartitionBuilder builder = new PartitionBuilder();
+       Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+       part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
+       makeTestChangesOnPartition(part1);
+       client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+       fail("Should have thrown InvalidOperationException");
+     } catch (InvalidOperationException e) {
+       part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1).get(0);
+       assertPartitionUnchanged(part1, part1.getValues(), PARTCOL_SCHEMA);
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part));
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
++    try {
++      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part));
++      Assert.fail("didn't throw");
++    } catch (TProtocolException | MetaException e) {
++      // By design
++    }
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsNullPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, null));
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsNullPartitions() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(null, null));
+   }
+ 
+   @Test
+   public void testAlterPartitionsNullPartitionList() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+       client.alter_partitions(DB_NAME, TABLE_NAME, null);
+       fail("Should have thrown exception");
 -    } catch (NullPointerException | TTransportException e) {
++    } catch (NullPointerException | TTransportException | TProtocolException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setDbName(DB_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setTableName(TABLE_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p));
+   }
+ 
+ 
+ 
+   /**
+    * Testing
+    *    alter_partitions(String,String,List(Partition),EnvironmentContext) ->
+    *    alter_partitions_with_environment_context(String,String,List(Partition),EnvironmentContext).
+    */
+   @Test
+   public void testAlterPartitionsWithEnvironmentCtx() throws Exception {
+     EnvironmentContext context = new EnvironmentContext();
+     context.setProperties(new HashMap<String, String>(){
+       {
+         put("TestKey", "TestValue");
+       }
+     });
+ 
+     List<List<String>> testValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionUnchanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     oldParts.forEach(p -> makeTestChangesOnPartition(p));
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, oldParts, context);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+     assertPartitionsHaveCorrectValues(newParts, testValues);
+ 
+     client.alter_partitions(DB_NAME, TABLE_NAME, newParts, new EnvironmentContext());
 -    client.alter_partitions(DB_NAME, TABLE_NAME, newParts, null);
++    client.alter_partitions(DB_NAME, TABLE_NAME, newParts);
+ 
+     for (int i = 0; i < testValues.size(); ++i) {
+       assertPartitionChanged(oldParts.get(i), testValues.get(i), PARTCOL_SCHEMA);
+     }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxEmptyPartitionList() throws Exception {
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxUnknownPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("1111").addValue("11").addValue("11").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxIncompletePartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).addValue("2017").build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxMissingPartitionVals() throws Exception {
+     createTable4PartColsParts(client);
+     Table t = client.getTable(DB_NAME, TABLE_NAME);
+     PartitionBuilder builder = new PartitionBuilder();
+     Partition part = builder.inTable(t).build(metaStore.getConf());
+     Partition part1 = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, part1),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxBogusCatalogName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
++    client.alter_partitions("nosuch", DB_NAME, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext(),
++        -1, null, -1);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNoDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions("", TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullDbName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(null, TABLE_NAME, Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNoTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, "", Lists.newArrayList(part), new EnvironmentContext());
+   }
+ 
 -  @Test(expected = MetaException.class)
++  @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullTblName() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
 -    client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
++    try {
++      client.alter_partitions(DB_NAME, null, Lists.newArrayList(part), new EnvironmentContext());
++      Assert.fail("didn't throw");
++    } catch (MetaException | TProtocolException ex) {
++      // By design.
++    }
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullPartition() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(part, null),
+             new EnvironmentContext());
+   }
+ 
+   @Test(expected = NullPointerException.class)
+   public void testAlterPartitionsWithEnvironmentCtxNullPartitions() throws Exception {
+     createTable4PartColsParts(client);
+     Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(null, null),
+             new EnvironmentContext());
+   }
+ 
+   @Test
+   public void testAlterPartitionsWithEnvironmentCtxNullPartitionList() throws Exception {
+     try {
+       createTable4PartColsParts(client);
+       Partition part = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1).get(0);
+       client.alter_partitions(DB_NAME, TABLE_NAME, null, new EnvironmentContext());
+       fail("Should have thrown exception");
 -    } catch (NullPointerException | TTransportException e) {
++    } catch (NullPointerException | TTransportException | TProtocolException e) {
+       //TODO: should not throw different exceptions for different HMS deployment types
+     }
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeDbName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setDbName(DB_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeTableName() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setTableName(TABLE_NAME+"_changed");
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testAlterPartitionsWithEnvironmentCtxChangeValues() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> partitions = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     Partition p = partitions.get(3);
+     p.setValues(Lists.newArrayList("1", "2", "3"));
+     client.alter_partitions(DB_NAME, TABLE_NAME, Lists.newArrayList(p), new EnvironmentContext());
+   }
+ 
+   /**
+    * Testing
+    *    renamePartition(String,String,List(String),Partition) ->
+    *    renamePartition(String,String,List(String),Partition).
+    */
+   @Test
+   public void testRenamePartition() throws Exception {
+ 
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<List<String>> newValues = new ArrayList<>();
+ 
+     List<String> newVal = Lists.newArrayList("2018", "01", "16");
+     newValues.addAll(oldValues.subList(0, 3));
+     newValues.add(newVal);
+ 
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(newVal);
+     makeTestChangesOnPartition(partToRename);
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+ 
+     List<Partition> newParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+     assertPartitionsHaveCorrectValues(newParts, newValues);
+ 
+ 
+     //Asserting other partition parameters can also be changed, but not the location
+     assertFalse(newParts.get(3).getSd().getLocation().endsWith("hh=01"));
+     assertEquals(3, newParts.get(3).getSd().getCols().size());
+     assertEquals("testValue001", newParts.get(3).getParameters().get("hmsTestParam001"));
+     assertEquals(NEW_CREATE_TIME, newParts.get(3).getCreateTime());
+     assertEquals(NEW_CREATE_TIME, newParts.get(3).getLastAccessTime());
+ 
+ 
+ 
+     assertTrue(client.listPartitions(DB_NAME, TABLE_NAME, oldValues.get(3), (short)-1).isEmpty());
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionTargetAlreadyExisting() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), oldParts.get(2));
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoSuchOldPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("1", "2", ""), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullTableInPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setTableName(null);
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
+             partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullDbInPartition() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setDbName(null);
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2017", "11", "27"),
+             partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionEmptyOldPartList() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList(), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNullOldPartList() throws Exception {
+     createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, TABLE_NAME, null, partToRename);
+   }
+ 
+   @Test
+   public void testRenamePartitionNullNewPart() throws Exception {
+     try {
+       List<List<String>> oldValues = createTable4PartColsParts(client);
+       List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short) -1);
+ 
+       Partition partToRename = oldParts.get(3);
+       partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+       client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), null);
+     } catch (NullPointerException | TTransportException e) {
+     }
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionBogusCatalogName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition("nosuch", DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition("", TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, "", oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(null, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionNullTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     client.renamePartition(DB_NAME, null, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionChangeTblName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setTableName(TABLE_NAME + "_2");
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = MetaException.class)
+   public void testRenamePartitionChangeDbName() throws Exception {
+     List<List<String>> oldValues = createTable4PartColsParts(client);
+     List<Partition> oldParts = client.listPartitions(DB_NAME, TABLE_NAME, (short)-1);
+ 
+     Partition partToRename = oldParts.get(3);
+     partToRename.setValues(Lists.newArrayList("2018", "01", "16"));
+     partToRename.setDbName(DB_NAME + "_2");
+     client.renamePartition(DB_NAME, TABLE_NAME, oldValues.get(3), partToRename);
+   }
+ 
+   @Test(expected = InvalidOperationException.class)
+   public void testRenamePartitionNoTable() throws Exception {
+     client.renamePartition(DB_NAME, TABLE_NAME, Lists.newArrayList("2018", "01", "16"),
+             new Partition());
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/pom.xml
----------------------------------------------------------------------


[91/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/93b9cdd6
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/93b9cdd6
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/93b9cdd6

Branch: refs/heads/master-txnstats
Commit: 93b9cdd6945c30a46dab77cdee894a15c3fd8ca5
Parents: b95dc96 20eb7b5
Author: sergey <se...@apache.org>
Authored: Thu Jul 12 18:39:42 2018 -0700
Committer: sergey <se...@apache.org>
Committed: Thu Jul 12 18:39:42 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |      1 +
 .../hs2connection/BeelineSiteParser.java        |      2 +-
 .../UserHS2ConnectionFileParser.java            |      2 +-
 .../hs2connection/TestBeelineSiteParser.java    |     41 +
 .../TestUserHS2ConnectionFileParser.java        |     16 +
 .../apache/hadoop/hive/conf/HiveConfUtil.java   |      6 +-
 .../JavaIOTmpdirVariableCoercion.java           |     10 +-
 .../org/apache/hive/http/JMXJsonServlet.java    |      6 +-
 .../hadoop/hive/druid/io/DruidRecordWriter.java |      2 +-
 .../hive/hbase/HiveHBaseInputFormatUtil.java    |      8 +-
 hcatalog/core/pom.xml                           |      2 +-
 hcatalog/webhcat/java-client/pom.xml            |      2 +-
 hcatalog/webhcat/svr/pom.xml                    |      2 +-
 .../hive/hcatalog/templeton/ListDelegator.java  |      8 +-
 itests/hcatalog-unit/pom.xml                    |      2 +-
 itests/hive-blobstore/pom.xml                   |      4 +-
 itests/hive-minikdc/pom.xml                     |      4 +-
 itests/hive-unit-hadoop2/pom.xml                |      2 +-
 itests/hive-unit/pom.xml                        |      4 +-
 itests/qtest-accumulo/pom.xml                   |      4 +-
 itests/qtest-spark/pom.xml                      |      4 +-
 itests/qtest/pom.xml                            |      4 +-
 .../test/resources/testconfiguration.properties |      2 +
 itests/util/pom.xml                             |      2 +-
 ...SQLStdHiveAuthorizationValidatorForTest.java |     12 +-
 llap-server/pom.xml                             |      2 +-
 .../services/impl/LlapIoMemoryServlet.java      |      6 +-
 .../llap/security/LlapServerSecurityInfo.java   |      8 +-
 .../endpoint/LlapPluginSecurityInfo.java        |      9 +-
 metastore/pom.xml                               |      2 +-
 packaging/src/main/assembly/bin.xml             |      2 +-
 packaging/src/main/assembly/src.xml             |      2 +-
 pom.xml                                         |     25 +-
 ql/pom.xml                                      |      4 +-
 .../ql/exec/spark/session/SparkSessionImpl.java |     10 +-
 .../hadoop/hive/ql/exec/tez/DagUtils.java       |    101 +-
 .../keyseries/VectorKeySeriesSingleImpl.java    |      5 +-
 .../exec/vector/ptf/VectorPTFEvaluatorBase.java |      7 -
 .../vector/ptf/VectorPTFEvaluatorCount.java     |      9 +-
 .../vector/ptf/VectorPTFEvaluatorCountStar.java |     12 +-
 .../ptf/VectorPTFEvaluatorDecimalAvg.java       |      8 +-
 .../VectorPTFEvaluatorDecimalFirstValue.java    |     10 +-
 .../ptf/VectorPTFEvaluatorDecimalLastValue.java |      9 +-
 .../ptf/VectorPTFEvaluatorDecimalMax.java       |      9 +-
 .../ptf/VectorPTFEvaluatorDecimalMin.java       |      9 +-
 .../ptf/VectorPTFEvaluatorDecimalSum.java       |      8 +-
 .../vector/ptf/VectorPTFEvaluatorDenseRank.java |      8 +-
 .../vector/ptf/VectorPTFEvaluatorDoubleAvg.java |      8 +-
 .../ptf/VectorPTFEvaluatorDoubleFirstValue.java |      9 +-
 .../ptf/VectorPTFEvaluatorDoubleLastValue.java  |     13 +-
 .../vector/ptf/VectorPTFEvaluatorDoubleMax.java |      8 +-
 .../vector/ptf/VectorPTFEvaluatorDoubleMin.java |      8 +-
 .../vector/ptf/VectorPTFEvaluatorDoubleSum.java |      8 +-
 .../vector/ptf/VectorPTFEvaluatorLongAvg.java   |      8 +-
 .../ptf/VectorPTFEvaluatorLongFirstValue.java   |      9 +-
 .../ptf/VectorPTFEvaluatorLongLastValue.java    |      8 +-
 .../vector/ptf/VectorPTFEvaluatorLongMax.java   |      8 +-
 .../vector/ptf/VectorPTFEvaluatorLongMin.java   |      8 +-
 .../vector/ptf/VectorPTFEvaluatorLongSum.java   |      8 +-
 .../exec/vector/ptf/VectorPTFEvaluatorRank.java |      8 +-
 .../vector/ptf/VectorPTFEvaluatorRowNumber.java |      8 +-
 .../exec/vector/ptf/VectorPTFGroupBatches.java  |      6 -
 .../ql/exec/vector/ptf/VectorPTFOperator.java   |     20 +-
 .../VectorReduceSinkCommonOperator.java         |      6 +-
 .../VectorReduceSinkEmptyKeyOperator.java       |      6 +-
 .../VectorReduceSinkLongOperator.java           |      4 -
 .../VectorReduceSinkMultiKeyOperator.java       |      4 -
 .../VectorReduceSinkObjectHashOperator.java     |      8 +-
 .../VectorReduceSinkStringOperator.java         |      5 -
 .../VectorReduceSinkUniformHashOperator.java    |      6 +-
 .../optimizer/FixedBucketPruningOptimizer.java  |      5 -
 .../ql/optimizer/PartitionColumnsSeparator.java |      6 +-
 .../TablePropertyEnrichmentOptimizer.java       |     27 +-
 .../rules/HivePointLookupOptimizerRule.java     |     13 +-
 .../org/apache/hadoop/hive/ql/parse/HiveLexer.g |      1 +
 .../apache/hadoop/hive/ql/parse/HiveParser.g    |      3 +
 .../hadoop/hive/ql/parse/IdentifiersParser.g    |      3 +-
 .../hadoop/hive/ql/parse/ParseDriver.java       |     11 +
 .../org/apache/hadoop/hive/ql/parse/QB.java     |      4 +
 .../hadoop/hive/ql/parse/SemanticAnalyzer.java  |     33 +-
 .../hadoop/hive/ql/parse/TezCompiler.java       |     72 +
 .../hadoop/hive/ql/plan/VectorPTFInfo.java      |      4 -
 .../hive/ql/plan/mapper/StatsSources.java       |      9 +-
 .../hadoop/hive/ql/stats/OperatorStats.java     |      7 +
 .../hadoop/hive/ql/udf/generic/BaseMaskUDF.java |     23 +-
 .../session/TestSparkSessionManagerImpl.java    |      5 +-
 .../TestSQL11ReservedKeyWordsNegative.java      |     13 +-
 .../hive/testutils/MiniZooKeeperCluster.java    |      8 +-
 ql/src/test/queries/clientpositive/masking_13.q |     28 +
 ql/src/test/queries/clientpositive/real.q       |      2 +
 ql/src/test/queries/clientpositive/reopt_dpp.q  |     62 +
 .../queries/clientpositive/reopt_semijoin.q     |     76 +
 .../clientpositive/results_cache_with_masking.q |      3 +-
 .../clientpositive/vector_delete_orig_table.q   |      2 +
 .../results/clientpositive/llap/reopt_dpp.q.out |    259 +
 .../clientpositive/llap/reopt_semijoin.q.out    |    420 +
 .../llap/results_cache_with_masking.q.out       |    677 +-
 .../test/results/clientpositive/masking_1.q.out |   7265 +-
 .../results/clientpositive/masking_12.q.out     |   1752 +-
 .../results/clientpositive/masking_13.q.out     |    208 +
 .../clientpositive/masking_1_newdb.q.out        |   1053 +-
 .../test/results/clientpositive/masking_2.q.out |   1140 +-
 .../test/results/clientpositive/masking_3.q.out |   6399 +-
 .../test/results/clientpositive/masking_4.q.out |    163 +-
 .../test/results/clientpositive/masking_5.q.out |    836 +-
 .../test/results/clientpositive/masking_6.q.out |   2068 +-
 .../test/results/clientpositive/masking_7.q.out |   2068 +-
 .../test/results/clientpositive/masking_8.q.out |   1645 +-
 .../clientpositive/masking_disablecbo_1.q.out   |   7277 +-
 .../clientpositive/masking_disablecbo_2.q.out   |   1225 +-
 .../clientpositive/masking_disablecbo_4.q.out   |     90 +-
 ql/src/test/results/clientpositive/real.q.out   |     16 +
 .../clientpositive/union_pos_alias.q.out        |   1658 +-
 .../vector_delete_orig_table.q.out              |     11 +-
 service/pom.xml                                 |      2 +-
 .../hive/http/JdbcJarDownloadServlet.java       |      6 +-
 .../java/org/apache/hive/http/LlapServlet.java  |      6 +-
 .../apache/hadoop/hive/shims/ShimLoader.java    |     16 +-
 standalone-metastore/DEV-README                 |      2 +-
 .../findbugs/findbugs-exclude.xml               |     24 +
 standalone-metastore/metastore-common/pom.xml   |    754 +
 .../metastore-common/src/assembly/bin.xml       |    136 +
 .../metastore-common/src/assembly/src.xml       |     53 +
 .../hive/metastore/api/AbortTxnRequest.java     |    497 +
 .../hive/metastore/api/AbortTxnsRequest.java    |    438 +
 .../api/AddCheckConstraintRequest.java          |    443 +
 .../api/AddDefaultConstraintRequest.java        |    443 +
 .../metastore/api/AddDynamicPartitions.java     |    959 +
 .../metastore/api/AddForeignKeyRequest.java     |    443 +
 .../api/AddNotNullConstraintRequest.java        |    443 +
 .../metastore/api/AddPartitionsRequest.java     |   1162 +
 .../hive/metastore/api/AddPartitionsResult.java |    550 +
 .../metastore/api/AddPrimaryKeyRequest.java     |    443 +
 .../api/AddUniqueConstraintRequest.java         |    443 +
 .../hadoop/hive/metastore/api/AggrStats.java    |    645 +
 .../api/AllocateTableWriteIdsRequest.java       |    915 +
 .../api/AllocateTableWriteIdsResponse.java      |    443 +
 .../metastore/api/AlreadyExistsException.java   |    395 +
 .../hive/metastore/api/AlterCatalogRequest.java |    504 +
 .../hive/metastore/api/AlterISchemaRequest.java |    509 +
 .../metastore/api/AlterPartitionsRequest.java   |   1067 +
 .../metastore/api/AlterPartitionsResponse.java  |    283 +
 .../hadoop/hive/metastore/api/BasicTxnInfo.java |    907 +
 .../metastore/api/BinaryColumnStatsData.java    |    696 +
 .../metastore/api/BooleanColumnStatsData.java   |    696 +
 .../metastore/api/CacheFileMetadataRequest.java |    703 +
 .../metastore/api/CacheFileMetadataResult.java  |    387 +
 .../hadoop/hive/metastore/api/Catalog.java      |    606 +
 .../metastore/api/CheckConstraintsRequest.java  |    591 +
 .../metastore/api/CheckConstraintsResponse.java |    443 +
 .../hive/metastore/api/CheckLockRequest.java    |    589 +
 .../metastore/api/ClearFileMetadataRequest.java |    438 +
 .../metastore/api/ClearFileMetadataResult.java  |    283 +
 .../hive/metastore/api/ClientCapabilities.java  |    441 +
 .../hive/metastore/api/ClientCapability.java    |     45 +
 .../hive/metastore/api/CmRecycleRequest.java    |    488 +
 .../hive/metastore/api/CmRecycleResponse.java   |    283 +
 .../hive/metastore/api/ColumnStatistics.java    |    863 +
 .../metastore/api/ColumnStatisticsData.java     |    675 +
 .../metastore/api/ColumnStatisticsDesc.java     |    904 +
 .../hive/metastore/api/ColumnStatisticsObj.java |    593 +
 .../hive/metastore/api/CommitTxnRequest.java    |    657 +
 .../hive/metastore/api/CompactionRequest.java   |    977 +
 .../hive/metastore/api/CompactionResponse.java  |    583 +
 .../hive/metastore/api/CompactionType.java      |     45 +
 .../api/ConfigValSecurityException.java         |    395 +
 .../metastore/api/CreateCatalogRequest.java     |    400 +
 .../hive/metastore/api/CreationMetadata.java    |    851 +
 .../api/CurrentNotificationEventId.java         |    387 +
 .../hive/metastore/api/DataOperationType.java   |     57 +
 .../hadoop/hive/metastore/api/Database.java     |   1201 +
 .../apache/hadoop/hive/metastore/api/Date.java  |    387 +
 .../hive/metastore/api/DateColumnStatsData.java |    823 +
 .../hadoop/hive/metastore/api/Decimal.java      |    497 +
 .../metastore/api/DecimalColumnStatsData.java   |    823 +
 .../api/DefaultConstraintsRequest.java          |    591 +
 .../api/DefaultConstraintsResponse.java         |    443 +
 .../metastore/api/DoubleColumnStatsData.java    |    799 +
 .../hive/metastore/api/DropCatalogRequest.java  |    395 +
 .../metastore/api/DropConstraintRequest.java    |    701 +
 .../hive/metastore/api/DropPartitionsExpr.java  |    505 +
 .../metastore/api/DropPartitionsRequest.java    |   1218 +
 .../metastore/api/DropPartitionsResult.java     |    447 +
 .../hive/metastore/api/EnvironmentContext.java  |    447 +
 .../hive/metastore/api/EventRequestType.java    |     48 +
 .../hadoop/hive/metastore/api/FieldSchema.java  |    603 +
 .../metastore/api/FileMetadataExprType.java     |     42 +
 .../metastore/api/FindSchemasByColsResp.java    |    449 +
 .../metastore/api/FindSchemasByColsRqst.java    |    605 +
 .../hive/metastore/api/FireEventRequest.java    |    967 +
 .../metastore/api/FireEventRequestData.java     |    309 +
 .../hive/metastore/api/FireEventResponse.java   |    283 +
 .../hive/metastore/api/ForeignKeysRequest.java  |    814 +
 .../hive/metastore/api/ForeignKeysResponse.java |    443 +
 .../hadoop/hive/metastore/api/Function.java     |   1306 +
 .../hadoop/hive/metastore/api/FunctionType.java |     42 +
 .../metastore/api/GetAllFunctionsResponse.java  |    447 +
 .../hive/metastore/api/GetCatalogRequest.java   |    395 +
 .../hive/metastore/api/GetCatalogResponse.java  |    400 +
 .../hive/metastore/api/GetCatalogsResponse.java |    444 +
 .../api/GetFileMetadataByExprRequest.java       |    773 +
 .../api/GetFileMetadataByExprResult.java        |    553 +
 .../metastore/api/GetFileMetadataRequest.java   |    438 +
 .../metastore/api/GetFileMetadataResult.java    |    540 +
 .../metastore/api/GetOpenTxnsInfoResponse.java  |    542 +
 .../hive/metastore/api/GetOpenTxnsResponse.java |    750 +
 .../api/GetPrincipalsInRoleRequest.java         |    389 +
 .../api/GetPrincipalsInRoleResponse.java        |    443 +
 .../api/GetRoleGrantsForPrincipalRequest.java   |    502 +
 .../api/GetRoleGrantsForPrincipalResponse.java  |    443 +
 .../metastore/api/GetRuntimeStatsRequest.java   |    482 +
 .../hive/metastore/api/GetSerdeRequest.java     |    395 +
 .../hive/metastore/api/GetTableRequest.java     |    922 +
 .../hive/metastore/api/GetTableResult.java      |    501 +
 .../hive/metastore/api/GetTablesRequest.java    |    765 +
 .../hive/metastore/api/GetTablesResult.java     |    443 +
 .../metastore/api/GetValidWriteIdsRequest.java  |    539 +
 .../metastore/api/GetValidWriteIdsResponse.java |    443 +
 .../api/GrantRevokePrivilegeRequest.java        |    620 +
 .../api/GrantRevokePrivilegeResponse.java       |    390 +
 .../metastore/api/GrantRevokeRoleRequest.java   |   1059 +
 .../metastore/api/GrantRevokeRoleResponse.java  |    390 +
 .../hive/metastore/api/GrantRevokeType.java     |     45 +
 .../hive/metastore/api/HeartbeatRequest.java    |    489 +
 .../metastore/api/HeartbeatTxnRangeRequest.java |    482 +
 .../api/HeartbeatTxnRangeResponse.java          |    588 +
 .../hive/metastore/api/HiveObjectPrivilege.java |    833 +
 .../hive/metastore/api/HiveObjectRef.java       |    979 +
 .../hive/metastore/api/HiveObjectType.java      |     54 +
 .../hadoop/hive/metastore/api/ISchema.java      |   1266 +
 .../hadoop/hive/metastore/api/ISchemaName.java  |    603 +
 .../metastore/api/InsertEventRequestData.java   |    855 +
 .../metastore/api/InvalidInputException.java    |    395 +
 .../metastore/api/InvalidObjectException.java   |    395 +
 .../api/InvalidOperationException.java          |    395 +
 .../api/InvalidPartitionException.java          |    395 +
 .../hive/metastore/api/LockComponent.java       |   1158 +
 .../hadoop/hive/metastore/api/LockLevel.java    |     48 +
 .../hadoop/hive/metastore/api/LockRequest.java  |    861 +
 .../hadoop/hive/metastore/api/LockResponse.java |    500 +
 .../hadoop/hive/metastore/api/LockState.java    |     51 +
 .../hadoop/hive/metastore/api/LockType.java     |     48 +
 .../hive/metastore/api/LongColumnStatsData.java |    799 +
 .../api/MapSchemaVersionToSerdeRequest.java     |    504 +
 .../hive/metastore/api/Materialization.java     |    750 +
 .../hive/metastore/api/MetaException.java       |    395 +
 .../hive/metastore/api/MetadataPpdResult.java   |    517 +
 .../hive/metastore/api/NoSuchLockException.java |    395 +
 .../metastore/api/NoSuchObjectException.java    |    395 +
 .../hive/metastore/api/NoSuchTxnException.java  |    395 +
 .../api/NotNullConstraintsRequest.java          |    591 +
 .../api/NotNullConstraintsResponse.java         |    443 +
 .../hive/metastore/api/NotificationEvent.java   |   1112 +
 .../metastore/api/NotificationEventRequest.java |    490 +
 .../api/NotificationEventResponse.java          |    443 +
 .../api/NotificationEventsCountRequest.java     |    598 +
 .../api/NotificationEventsCountResponse.java    |    387 +
 .../hive/metastore/api/OpenTxnRequest.java      |    963 +
 .../hive/metastore/api/OpenTxnsResponse.java    |    438 +
 .../apache/hadoop/hive/metastore/api/Order.java |    497 +
 .../hadoop/hive/metastore/api/Partition.java    |   1535 +
 .../hive/metastore/api/PartitionEventType.java  |     42 +
 .../api/PartitionListComposingSpec.java         |    449 +
 .../hive/metastore/api/PartitionSpec.java       |   1136 +
 .../api/PartitionSpecWithSharedSD.java          |    558 +
 .../metastore/api/PartitionValuesRequest.java   |   1328 +
 .../metastore/api/PartitionValuesResponse.java  |    443 +
 .../hive/metastore/api/PartitionValuesRow.java  |    438 +
 .../hive/metastore/api/PartitionWithoutSD.java  |   1016 +
 .../metastore/api/PartitionsByExprRequest.java  |    921 +
 .../metastore/api/PartitionsByExprResult.java   |    542 +
 .../metastore/api/PartitionsStatsRequest.java   |   1111 +
 .../metastore/api/PartitionsStatsResult.java    |    597 +
 .../hive/metastore/api/PrimaryKeysRequest.java  |    600 +
 .../hive/metastore/api/PrimaryKeysResponse.java |    443 +
 .../metastore/api/PrincipalPrivilegeSet.java    |    906 +
 .../hive/metastore/api/PrincipalType.java       |     48 +
 .../hadoop/hive/metastore/api/PrivilegeBag.java |    449 +
 .../hive/metastore/api/PrivilegeGrantInfo.java  |    815 +
 .../metastore/api/PutFileMetadataRequest.java   |    710 +
 .../metastore/api/PutFileMetadataResult.java    |    283 +
 .../api/ReplTblWriteIdStateRequest.java         |    952 +
 .../hive/metastore/api/RequestPartsSpec.java    |    438 +
 .../hadoop/hive/metastore/api/ResourceType.java |     48 +
 .../hadoop/hive/metastore/api/ResourceUri.java  |    511 +
 .../apache/hadoop/hive/metastore/api/Role.java  |    601 +
 .../hive/metastore/api/RolePrincipalGrant.java  |   1035 +
 .../hadoop/hive/metastore/api/RuntimeStat.java  |    600 +
 .../hive/metastore/api/SQLCheckConstraint.java  |   1213 +
 .../metastore/api/SQLDefaultConstraint.java     |   1213 +
 .../hive/metastore/api/SQLForeignKey.java       |   1822 +
 .../metastore/api/SQLNotNullConstraint.java     |   1109 +
 .../hive/metastore/api/SQLPrimaryKey.java       |   1210 +
 .../hive/metastore/api/SQLUniqueConstraint.java |   1207 +
 .../hadoop/hive/metastore/api/Schema.java       |    605 +
 .../hive/metastore/api/SchemaCompatibility.java |     51 +
 .../hadoop/hive/metastore/api/SchemaType.java   |     45 +
 .../hive/metastore/api/SchemaValidation.java    |     45 +
 .../hive/metastore/api/SchemaVersion.java       |   1412 +
 .../metastore/api/SchemaVersionDescriptor.java  |    502 +
 .../hive/metastore/api/SchemaVersionState.java  |     63 +
 .../hadoop/hive/metastore/api/SerDeInfo.java    |   1092 +
 .../hadoop/hive/metastore/api/SerdeType.java    |     45 +
 .../api/SetPartitionsStatsRequest.java          |    858 +
 .../api/SetSchemaVersionStateRequest.java       |    516 +
 .../hive/metastore/api/ShowCompactRequest.java  |    283 +
 .../hive/metastore/api/ShowCompactResponse.java |    443 +
 .../api/ShowCompactResponseElement.java         |   1641 +
 .../hive/metastore/api/ShowLocksRequest.java    |    710 +
 .../hive/metastore/api/ShowLocksResponse.java   |    449 +
 .../metastore/api/ShowLocksResponseElement.java |   1929 +
 .../hadoop/hive/metastore/api/SkewedInfo.java   |    834 +
 .../hive/metastore/api/StorageDescriptor.java   |   1748 +
 .../metastore/api/StringColumnStatsData.java    |    791 +
 .../apache/hadoop/hive/metastore/api/Table.java |   2483 +
 .../hadoop/hive/metastore/api/TableMeta.java    |    807 +
 .../hive/metastore/api/TableStatsRequest.java   |    961 +
 .../hive/metastore/api/TableStatsResult.java    |    550 +
 .../hive/metastore/api/TableValidWriteIds.java  |    851 +
 .../hive/metastore/api/ThriftHiveMetastore.java | 240381 ++++++++++++++++
 .../hive/metastore/api/TxnAbortedException.java |    395 +
 .../hadoop/hive/metastore/api/TxnInfo.java      |   1220 +
 .../hive/metastore/api/TxnOpenException.java    |    395 +
 .../hadoop/hive/metastore/api/TxnState.java     |     48 +
 .../hadoop/hive/metastore/api/TxnToWriteId.java |    482 +
 .../apache/hadoop/hive/metastore/api/Type.java  |    768 +
 .../metastore/api/UniqueConstraintsRequest.java |    591 +
 .../api/UniqueConstraintsResponse.java          |    443 +
 .../hive/metastore/api/UnknownDBException.java  |    395 +
 .../api/UnknownPartitionException.java          |    395 +
 .../metastore/api/UnknownTableException.java    |    395 +
 .../hive/metastore/api/UnlockRequest.java       |    387 +
 .../hadoop/hive/metastore/api/Version.java      |    499 +
 .../hive/metastore/api/WMAlterPoolRequest.java  |    504 +
 .../hive/metastore/api/WMAlterPoolResponse.java |    283 +
 .../api/WMAlterResourcePlanRequest.java         |    805 +
 .../api/WMAlterResourcePlanResponse.java        |    398 +
 .../metastore/api/WMAlterTriggerRequest.java    |    398 +
 .../metastore/api/WMAlterTriggerResponse.java   |    283 +
 ...CreateOrDropTriggerToPoolMappingRequest.java |    708 +
 ...reateOrDropTriggerToPoolMappingResponse.java |    283 +
 .../api/WMCreateOrUpdateMappingRequest.java     |    501 +
 .../api/WMCreateOrUpdateMappingResponse.java    |    283 +
 .../hive/metastore/api/WMCreatePoolRequest.java |    398 +
 .../metastore/api/WMCreatePoolResponse.java     |    283 +
 .../api/WMCreateResourcePlanRequest.java        |    504 +
 .../api/WMCreateResourcePlanResponse.java       |    283 +
 .../metastore/api/WMCreateTriggerRequest.java   |    398 +
 .../metastore/api/WMCreateTriggerResponse.java  |    283 +
 .../metastore/api/WMDropMappingRequest.java     |    398 +
 .../metastore/api/WMDropMappingResponse.java    |    283 +
 .../hive/metastore/api/WMDropPoolRequest.java   |    499 +
 .../hive/metastore/api/WMDropPoolResponse.java  |    283 +
 .../api/WMDropResourcePlanRequest.java          |    393 +
 .../api/WMDropResourcePlanResponse.java         |    283 +
 .../metastore/api/WMDropTriggerRequest.java     |    499 +
 .../metastore/api/WMDropTriggerResponse.java    |    283 +
 .../hive/metastore/api/WMFullResourcePlan.java  |   1033 +
 .../api/WMGetActiveResourcePlanRequest.java     |    283 +
 .../api/WMGetActiveResourcePlanResponse.java    |    398 +
 .../api/WMGetAllResourcePlanRequest.java        |    283 +
 .../api/WMGetAllResourcePlanResponse.java       |    447 +
 .../metastore/api/WMGetResourcePlanRequest.java |    393 +
 .../api/WMGetResourcePlanResponse.java          |    398 +
 .../api/WMGetTriggersForResourePlanRequest.java |    393 +
 .../WMGetTriggersForResourePlanResponse.java    |    447 +
 .../hadoop/hive/metastore/api/WMMapping.java    |    804 +
 .../hive/metastore/api/WMNullablePool.java      |    901 +
 .../metastore/api/WMNullableResourcePlan.java   |    918 +
 .../hadoop/hive/metastore/api/WMPool.java       |    802 +
 .../metastore/api/WMPoolSchedulingPolicy.java   |     45 +
 .../hive/metastore/api/WMPoolTrigger.java       |    490 +
 .../hive/metastore/api/WMResourcePlan.java      |    720 +
 .../metastore/api/WMResourcePlanStatus.java     |     48 +
 .../hadoop/hive/metastore/api/WMTrigger.java    |    809 +
 .../api/WMValidateResourcePlanRequest.java      |    393 +
 .../api/WMValidateResourcePlanResponse.java     |    597 +
 .../hive/metastore/api/WriteEventInfo.java      |   1012 +
 .../api/WriteNotificationLogRequest.java        |    949 +
 .../api/WriteNotificationLogResponse.java       |    283 +
 .../metastore/api/hive_metastoreConstants.java  |     89 +
 .../gen-php/metastore/ThriftHiveMetastore.php   |  60225 ++++
 .../src/gen/thrift/gen-php/metastore/Types.php  |  33017 +++
 .../src/gen/thrift/gen-py/__init__.py           |      0
 .../hive_metastore/ThriftHiveMetastore-remote   |   1641 +
 .../hive_metastore/ThriftHiveMetastore.py       |  49183 ++++
 .../thrift/gen-py/hive_metastore/__init__.py    |      1 +
 .../thrift/gen-py/hive_metastore/constants.py   |     36 +
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  23608 ++
 .../thrift/gen-rb/hive_metastore_constants.rb   |     59 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   5419 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |  13845 +
 .../hadoop/hive/common/StatsSetupConst.java     |    339 +
 .../common/classification/RetrySemantics.java   |     57 +
 .../common/ndv/NumDistinctValueEstimator.java   |     51 +
 .../ndv/NumDistinctValueEstimatorFactory.java   |     75 +
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |    359 +
 .../hive/common/ndv/fm/FMSketchUtils.java       |    132 +
 .../hive/common/ndv/hll/HLLConstants.java       |    933 +
 .../hive/common/ndv/hll/HLLDenseRegister.java   |    202 +
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |     50 +
 .../hive/common/ndv/hll/HLLSparseRegister.java  |    261 +
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |    664 +
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |    409 +
 .../hive/metastore/AcidEventListener.java       |    146 +
 .../hive/metastore/AggregateStatsCache.java     |    571 +
 .../hadoop/hive/metastore/AlterHandler.java     |    203 +
 .../apache/hadoop/hive/metastore/Batchable.java |     86 +
 .../hadoop/hive/metastore/ColumnType.java       |    301 +
 .../hadoop/hive/metastore/DatabaseProduct.java  |     75 +
 .../apache/hadoop/hive/metastore/Deadline.java  |    172 +
 .../hive/metastore/DeadlineException.java       |     29 +
 .../hive/metastore/DefaultHiveMetaHook.java     |     51 +
 .../DefaultMetaStoreFilterHookImpl.java         |     93 +
 .../DefaultPartitionExpressionProxy.java        |     57 +
 .../metastore/DefaultStorageSchemaReader.java   |     38 +
 .../hadoop/hive/metastore/FileFormatProxy.java  |     64 +
 .../hive/metastore/FileMetadataHandler.java     |    109 +
 .../hive/metastore/FileMetadataManager.java     |    119 +
 .../hive/metastore/HMSMetricsListener.java      |     90 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |    974 +
 .../hive/metastore/HiveMetaException.java       |     42 +
 .../hadoop/hive/metastore/HiveMetaHook.java     |    122 +
 .../hive/metastore/HiveMetaHookLoader.java      |     39 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   9422 +
 .../hive/metastore/HiveMetaStoreClient.java     |   3435 +
 .../hive/metastore/HiveMetaStoreFsImpl.java     |     55 +
 .../hive/metastore/IExtrapolatePartStatus.java  |     85 +
 .../hadoop/hive/metastore/IHMSHandler.java      |    109 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   3740 +
 .../hive/metastore/IMetaStoreSchemaInfo.java    |    115 +
 .../metastore/LinearExtrapolatePartStatus.java  |    106 +
 .../hive/metastore/LockComponentBuilder.java    |    121 +
 .../hive/metastore/LockRequestBuilder.java      |    168 +
 .../MaterializationsCacheCleanerTask.java       |     63 +
 .../MaterializationsInvalidationCache.java      |    543 +
 .../MaterializationsRebuildLockCleanerTask.java |     61 +
 .../MaterializationsRebuildLockHandler.java     |    216 +
 .../hive/metastore/MetaStoreDirectSql.java      |   2817 +
 .../metastore/MetaStoreEndFunctionContext.java  |     59 +
 .../metastore/MetaStoreEndFunctionListener.java |     58 +
 .../hive/metastore/MetaStoreEventListener.java  |    306 +
 .../MetaStoreEventListenerConstants.java        |     41 +
 .../hadoop/hive/metastore/MetaStoreFS.java      |     43 +
 .../hive/metastore/MetaStoreFilterHook.java     |    147 +
 .../hadoop/hive/metastore/MetaStoreInit.java    |    109 +
 .../hive/metastore/MetaStoreInitContext.java    |     27 +
 .../hive/metastore/MetaStoreInitListener.java   |     49 +
 .../metastore/MetaStoreListenerNotifier.java    |    375 +
 .../metastore/MetaStorePreEventListener.java    |     57 +
 .../hive/metastore/MetaStoreSchemaInfo.java     |    246 +
 .../metastore/MetaStoreSchemaInfoFactory.java   |     64 +
 .../hadoop/hive/metastore/MetaStoreThread.java  |     58 +
 .../hadoop/hive/metastore/MetadataStore.java    |     52 +
 .../hive/metastore/MetastoreTaskThread.java     |     38 +
 .../hadoop/hive/metastore/ObjectStore.java      |  12377 +
 .../hive/metastore/PartFilterExprUtil.java      |    165 +
 .../hive/metastore/PartitionDropOptions.java    |     54 +
 .../metastore/PartitionExpressionProxy.java     |     73 +
 .../apache/hadoop/hive/metastore/RawStore.java  |   1718 +
 .../hadoop/hive/metastore/RawStoreProxy.java    |    114 +
 .../hive/metastore/ReplChangeManager.java       |    501 +
 .../hive/metastore/RetryingHMSHandler.java      |    232 +
 .../hive/metastore/RetryingMetaStoreClient.java |    341 +
 .../hive/metastore/RuntimeStatsCleanerTask.java |     66 +
 .../metastore/SessionPropertiesListener.java    |     46 +
 .../hive/metastore/StatObjectConverter.java     |    892 +
 .../hive/metastore/StorageSchemaReader.java     |     46 +
 .../hive/metastore/TServerSocketKeepAlive.java  |     47 +
 .../hive/metastore/TSetIpAddressProcessor.java  |     62 +
 .../hive/metastore/TUGIBasedProcessor.java      |    183 +
 .../apache/hadoop/hive/metastore/TableType.java |     26 +
 .../hadoop/hive/metastore/ThreadPool.java       |     63 +
 .../TransactionalMetaStoreEventListener.java    |     39 +
 .../TransactionalValidationListener.java        |    487 +
 .../apache/hadoop/hive/metastore/Warehouse.java |    756 +
 .../annotation/MetastoreVersionAnnotation.java  |     85 +
 .../hive/metastore/annotation/NoReconnect.java  |     29 +
 .../api/InitializeTableWriteIdsRequest.java     |     42 +
 .../hive/metastore/api/utils/DecimalUtils.java  |     49 +
 .../hive/metastore/cache/ByteArrayWrapper.java  |     45 +
 .../hadoop/hive/metastore/cache/CacheUtils.java |    136 +
 .../hive/metastore/cache/CachedStore.java       |   2530 +
 .../hive/metastore/cache/SharedCache.java       |   1650 +
 .../client/builder/CatalogBuilder.java          |     62 +
 .../client/builder/ConstraintBuilder.java       |    115 +
 .../client/builder/DatabaseBuilder.java         |    122 +
 .../client/builder/FunctionBuilder.java         |    143 +
 .../GrantRevokePrivilegeRequestBuilder.java     |     63 +
 .../builder/HiveObjectPrivilegeBuilder.java     |     69 +
 .../client/builder/HiveObjectRefBuilder.java    |     69 +
 .../client/builder/ISchemaBuilder.java          |    102 +
 .../client/builder/PartitionBuilder.java        |    119 +
 .../builder/PrivilegeGrantInfoBuilder.java      |     84 +
 .../metastore/client/builder/RoleBuilder.java   |     55 +
 .../builder/SQLCheckConstraintBuilder.java      |     51 +
 .../builder/SQLDefaultConstraintBuilder.java    |     51 +
 .../client/builder/SQLForeignKeyBuilder.java    |    103 +
 .../builder/SQLNotNullConstraintBuilder.java    |     52 +
 .../client/builder/SQLPrimaryKeyBuilder.java    |     52 +
 .../builder/SQLUniqueConstraintBuilder.java     |     46 +
 .../client/builder/SchemaVersionBuilder.java    |    114 +
 .../client/builder/SerdeAndColsBuilder.java     |    124 +
 .../builder/StorageDescriptorBuilder.java       |    163 +
 .../metastore/client/builder/TableBuilder.java  |    224 +
 .../aggr/BinaryColumnStatsAggregator.java       |     61 +
 .../aggr/BooleanColumnStatsAggregator.java      |     62 +
 .../columnstats/aggr/ColumnStatsAggregator.java |     35 +
 .../aggr/ColumnStatsAggregatorFactory.java      |    113 +
 .../aggr/DateColumnStatsAggregator.java         |    360 +
 .../aggr/DecimalColumnStatsAggregator.java      |    375 +
 .../aggr/DoubleColumnStatsAggregator.java       |    348 +
 .../aggr/IExtrapolatePartStatus.java            |     47 +
 .../aggr/LongColumnStatsAggregator.java         |    348 +
 .../aggr/StringColumnStatsAggregator.java       |    304 +
 .../cache/DateColumnStatsDataInspector.java     |    124 +
 .../cache/DecimalColumnStatsDataInspector.java  |    124 +
 .../cache/DoubleColumnStatsDataInspector.java   |    124 +
 .../cache/LongColumnStatsDataInspector.java     |    124 +
 .../cache/StringColumnStatsDataInspector.java   |    125 +
 .../merge/BinaryColumnStatsMerger.java          |     35 +
 .../merge/BooleanColumnStatsMerger.java         |     35 +
 .../columnstats/merge/ColumnStatsMerger.java    |     31 +
 .../merge/ColumnStatsMergerFactory.java         |    120 +
 .../merge/DateColumnStatsMerger.java            |     59 +
 .../merge/DecimalColumnStatsMerger.java         |     85 +
 .../merge/DoubleColumnStatsMerger.java          |     54 +
 .../merge/LongColumnStatsMerger.java            |     54 +
 .../merge/StringColumnStatsMerger.java          |     54 +
 .../metastore/conf/ConfTemplatePrinter.java     |    150 +
 .../hive/metastore/conf/EnumValidator.java      |     26 +
 .../hive/metastore/conf/MetastoreConf.java      |   1688 +
 .../hive/metastore/conf/RangeValidator.java     |     38 +
 .../hive/metastore/conf/SizeValidator.java      |    110 +
 .../hive/metastore/conf/StringSetValidator.java |     51 +
 .../hive/metastore/conf/TimeValidator.java      |     67 +
 .../hadoop/hive/metastore/conf/Validator.java   |     87 +
 .../datasource/BoneCPDataSourceProvider.java    |     87 +
 .../datasource/DataSourceProvider.java          |     79 +
 .../datasource/DataSourceProviderFactory.java   |     66 +
 .../datasource/DbCPDataSourceProvider.java      |    117 +
 .../datasource/HikariCPDataSourceProvider.java  |     89 +
 .../hive/metastore/datasource/package-info.java |     23 +
 .../hive/metastore/events/AbortTxnEvent.java    |     51 +
 .../hive/metastore/events/AcidWriteEvent.java   |     91 +
 .../metastore/events/AddForeignKeyEvent.java    |     41 +
 .../events/AddNotNullConstraintEvent.java       |     42 +
 .../metastore/events/AddPartitionEvent.java     |     84 +
 .../metastore/events/AddPrimaryKeyEvent.java    |     42 +
 .../metastore/events/AddSchemaVersionEvent.java |     40 +
 .../events/AddUniqueConstraintEvent.java        |     42 +
 .../metastore/events/AllocWriteIdEvent.java     |     57 +
 .../metastore/events/AlterCatalogEvent.java     |     44 +
 .../metastore/events/AlterDatabaseEvent.java    |     56 +
 .../metastore/events/AlterISchemaEvent.java     |     45 +
 .../metastore/events/AlterPartitionEvent.java   |     75 +
 .../events/AlterSchemaVersionEvent.java         |     46 +
 .../hive/metastore/events/AlterTableEvent.java  |     63 +
 .../hive/metastore/events/CommitTxnEvent.java   |     51 +
 .../metastore/events/ConfigChangeEvent.java     |     52 +
 .../metastore/events/CreateCatalogEvent.java    |     39 +
 .../metastore/events/CreateDatabaseEvent.java   |     43 +
 .../metastore/events/CreateFunctionEvent.java   |     43 +
 .../metastore/events/CreateISchemaEvent.java    |     39 +
 .../hive/metastore/events/CreateTableEvent.java |     43 +
 .../hive/metastore/events/DropCatalogEvent.java |     39 +
 .../metastore/events/DropConstraintEvent.java   |     57 +
 .../metastore/events/DropDatabaseEvent.java     |     43 +
 .../metastore/events/DropFunctionEvent.java     |     43 +
 .../hive/metastore/events/DropISchemaEvent.java |     39 +
 .../metastore/events/DropPartitionEvent.java    |     70 +
 .../events/DropSchemaVersionEvent.java          |     40 +
 .../hive/metastore/events/DropTableEvent.java   |     54 +
 .../hive/metastore/events/EventCleanerTask.java |     66 +
 .../hive/metastore/events/InsertEvent.java      |    132 +
 .../hive/metastore/events/ListenerEvent.java    |    187 +
 .../events/LoadPartitionDoneEvent.java          |     57 +
 .../hive/metastore/events/OpenTxnEvent.java     |     51 +
 .../metastore/events/PreAddPartitionEvent.java  |     79 +
 .../events/PreAddSchemaVersionEvent.java        |     39 +
 .../metastore/events/PreAlterCatalogEvent.java  |     40 +
 .../metastore/events/PreAlterDatabaseEvent.java |     47 +
 .../metastore/events/PreAlterISchemaEvent.java  |     44 +
 .../events/PreAlterPartitionEvent.java          |     65 +
 .../events/PreAlterSchemaVersionEvent.java      |     45 +
 .../metastore/events/PreAlterTableEvent.java    |     53 +
 .../events/PreAuthorizationCallEvent.java       |     33 +
 .../metastore/events/PreCreateCatalogEvent.java |     39 +
 .../events/PreCreateDatabaseEvent.java          |     43 +
 .../metastore/events/PreCreateISchemaEvent.java |     39 +
 .../metastore/events/PreCreateTableEvent.java   |     43 +
 .../metastore/events/PreDropCatalogEvent.java   |     39 +
 .../metastore/events/PreDropDatabaseEvent.java  |     43 +
 .../metastore/events/PreDropISchemaEvent.java   |     39 +
 .../metastore/events/PreDropPartitionEvent.java |     67 +
 .../events/PreDropSchemaVersionEvent.java       |     39 +
 .../metastore/events/PreDropTableEvent.java     |     55 +
 .../hive/metastore/events/PreEventContext.java  |     82 +
 .../events/PreLoadPartitionDoneEvent.java       |     64 +
 .../metastore/events/PreReadCatalogEvent.java   |     39 +
 .../metastore/events/PreReadDatabaseEvent.java  |     46 +
 .../metastore/events/PreReadISchemaEvent.java   |     39 +
 .../metastore/events/PreReadTableEvent.java     |     47 +
 .../events/PreReadhSchemaVersionEvent.java      |     36 +
 .../metastore/hooks/JDOConnectionURLHook.java   |     52 +
 .../hive/metastore/hooks/URIResolverHook.java   |     37 +
 .../metastore/messaging/AbortTxnMessage.java    |     36 +
 .../metastore/messaging/AcidWriteMessage.java   |     50 +
 .../messaging/AddForeignKeyMessage.java         |     36 +
 .../messaging/AddNotNullConstraintMessage.java  |     36 +
 .../messaging/AddPartitionMessage.java          |     68 +
 .../messaging/AddPrimaryKeyMessage.java         |     35 +
 .../messaging/AddUniqueConstraintMessage.java   |     36 +
 .../messaging/AllocWriteIdMessage.java          |     36 +
 .../messaging/AlterCatalogMessage.java          |     29 +
 .../messaging/AlterDatabaseMessage.java         |     36 +
 .../messaging/AlterPartitionMessage.java        |     69 +
 .../metastore/messaging/AlterTableMessage.java  |     58 +
 .../metastore/messaging/CommitTxnMessage.java   |     59 +
 .../messaging/CreateCatalogMessage.java         |     25 +
 .../messaging/CreateDatabaseMessage.java        |     31 +
 .../messaging/CreateFunctionMessage.java        |     46 +
 .../metastore/messaging/CreateTableMessage.java |     53 +
 .../metastore/messaging/DropCatalogMessage.java |     25 +
 .../messaging/DropConstraintMessage.java        |     29 +
 .../messaging/DropDatabaseMessage.java          |     27 +
 .../messaging/DropFunctionMessage.java          |     38 +
 .../messaging/DropPartitionMessage.java         |     49 +
 .../metastore/messaging/DropTableMessage.java   |     46 +
 .../hive/metastore/messaging/EventMessage.java  |    127 +
 .../hive/metastore/messaging/EventUtils.java    |    202 +
 .../hive/metastore/messaging/InsertMessage.java |     75 +
 .../messaging/MessageDeserializer.java          |    200 +
 .../metastore/messaging/MessageFactory.java     |    341 +
 .../metastore/messaging/OpenTxnMessage.java     |     38 +
 .../metastore/messaging/PartitionFiles.java     |     53 +
 .../messaging/event/filters/AndFilter.java      |     39 +
 .../messaging/event/filters/BasicFilter.java    |     33 +
 .../event/filters/DatabaseAndTableFilter.java   |     65 +
 .../event/filters/EventBoundaryFilter.java      |     34 +
 .../event/filters/MessageFormatFilter.java      |     36 +
 .../messaging/json/JSONAbortTxnMessage.java     |     88 +
 .../messaging/json/JSONAcidWriteMessage.java    |    150 +
 .../json/JSONAddForeignKeyMessage.java          |    102 +
 .../json/JSONAddNotNullConstraintMessage.java   |     97 +
 .../messaging/json/JSONAddPartitionMessage.java |    175 +
 .../json/JSONAddPrimaryKeyMessage.java          |    102 +
 .../json/JSONAddUniqueConstraintMessage.java    |     99 +
 .../messaging/json/JSONAllocWriteIdMessage.java |    113 +
 .../messaging/json/JSONAlterCatalogMessage.java |     90 +
 .../json/JSONAlterDatabaseMessage.java          |     97 +
 .../json/JSONAlterPartitionMessage.java         |    153 +
 .../messaging/json/JSONAlterTableMessage.java   |    128 +
 .../messaging/json/JSONCommitTxnMessage.java    |    183 +
 .../json/JSONCreateCatalogMessage.java          |     80 +
 .../json/JSONCreateDatabaseMessage.java         |     85 +
 .../json/JSONCreateFunctionMessage.java         |     87 +
 .../messaging/json/JSONCreateTableMessage.java  |    134 +
 .../messaging/json/JSONDropCatalogMessage.java  |     67 +
 .../json/JSONDropConstraintMessage.java         |     91 +
 .../messaging/json/JSONDropDatabaseMessage.java |     72 +
 .../messaging/json/JSONDropFunctionMessage.java |     79 +
 .../json/JSONDropPartitionMessage.java          |    135 +
 .../messaging/json/JSONDropTableMessage.java    |    121 +
 .../messaging/json/JSONInsertMessage.java       |    148 +
 .../messaging/json/JSONMessageDeserializer.java |    273 +
 .../messaging/json/JSONMessageFactory.java      |    402 +
 .../messaging/json/JSONOpenTxnMessage.java      |    106 +
 .../hive/metastore/metrics/JsonReporter.java    |    223 +
 .../hive/metastore/metrics/JvmPauseMonitor.java |    222 +
 .../hadoop/hive/metastore/metrics/Metrics.java  |    244 +
 .../metastore/metrics/MetricsConstants.java     |     46 +
 .../hive/metastore/metrics/PerfLogger.java      |    194 +
 .../hadoop/hive/metastore/model/MCatalog.java   |     58 +
 .../hive/metastore/model/MColumnDescriptor.java |     51 +
 .../hive/metastore/model/MConstraint.java       |    214 +
 .../hive/metastore/model/MCreationMetadata.java |     87 +
 .../hive/metastore/model/MDBPrivilege.java      |    142 +
 .../hadoop/hive/metastore/model/MDatabase.java  |    157 +
 .../hive/metastore/model/MDelegationToken.java  |     45 +
 .../hive/metastore/model/MFieldSchema.java      |     80 +
 .../hadoop/hive/metastore/model/MFunction.java  |    119 +
 .../hive/metastore/model/MGlobalPrivilege.java  |    130 +
 .../hadoop/hive/metastore/model/MISchema.java   |    107 +
 .../hadoop/hive/metastore/model/MIndex.java     |    200 +
 .../hadoop/hive/metastore/model/MMasterKey.java |     55 +
 .../metastore/model/MMetastoreDBProperties.java |     56 +
 .../hive/metastore/model/MNotificationLog.java  |    108 +
 .../metastore/model/MNotificationNextId.java    |     42 +
 .../hadoop/hive/metastore/model/MOrder.java     |     62 +
 .../hadoop/hive/metastore/model/MPartition.java |    162 +
 .../model/MPartitionColumnPrivilege.java        |    171 +
 .../model/MPartitionColumnStatistics.java       |    281 +
 .../hive/metastore/model/MPartitionEvent.java   |     97 +
 .../metastore/model/MPartitionPrivilege.java    |    149 +
 .../hive/metastore/model/MPrincipalDesc.java    |     59 +
 .../hive/metastore/model/MResourceUri.java      |     49 +
 .../hadoop/hive/metastore/model/MRole.java      |     80 +
 .../hadoop/hive/metastore/model/MRoleMap.java   |    120 +
 .../hive/metastore/model/MRuntimeStat.java      |     59 +
 .../hive/metastore/model/MSchemaVersion.java    |    127 +
 .../hadoop/hive/metastore/model/MSerDeInfo.java |    127 +
 .../metastore/model/MStorageDescriptor.java     |    277 +
 .../hive/metastore/model/MStringList.java       |     62 +
 .../hadoop/hive/metastore/model/MTable.java     |    283 +
 .../metastore/model/MTableColumnPrivilege.java  |    170 +
 .../metastore/model/MTableColumnStatistics.java |    272 +
 .../hive/metastore/model/MTablePrivilege.java   |    149 +
 .../model/MTxnWriteNotificationLog.java         |    123 +
 .../hadoop/hive/metastore/model/MType.java      |    105 +
 .../hive/metastore/model/MVersionTable.java     |     57 +
 .../hadoop/hive/metastore/model/MWMMapping.java |     83 +
 .../hadoop/hive/metastore/model/MWMPool.java    |     89 +
 .../hive/metastore/model/MWMResourcePlan.java   |    105 +
 .../hadoop/hive/metastore/model/MWMTrigger.java |     89 +
 .../hive/metastore/parser/ExpressionTree.java   |    606 +
 .../hadoop/hive/metastore/parser/Filter.g       |    486 +
 .../hive/metastore/parser/package-info.java     |     23 +
 .../spec/CompositePartitionSpecProxy.java       |    258 +
 .../spec/PartitionListComposingSpecProxy.java   |    209 +
 .../partition/spec/PartitionSpecProxy.java      |    220 +
 .../spec/PartitionSpecWithSharedSDProxy.java    |    192 +
 .../hive/metastore/security/DBTokenStore.java   |    180 +
 .../security/DelegationTokenIdentifier.java     |     52 +
 .../security/DelegationTokenSecretManager.java  |    134 +
 .../security/DelegationTokenSelector.java       |     33 +
 .../security/DelegationTokenStore.java          |    116 +
 .../metastore/security/DelegationTokenTool.java |    252 +
 .../security/HadoopThriftAuthBridge.java        |    700 +
 .../security/HadoopThriftAuthBridge23.java      |    114 +
 .../metastore/security/MemoryTokenStore.java    |    118 +
 .../MetastoreDelegationTokenManager.java        |    180 +
 .../metastore/security/TFilterTransport.java    |     99 +
 .../security/TUGIAssumingTransport.java         |     73 +
 .../security/TUGIContainingTransport.java       |     96 +
 .../TokenStoreDelegationTokenSecretManager.java |    334 +
 .../metastore/security/ZooKeeperTokenStore.java |    474 +
 .../hive/metastore/tools/HiveMetaTool.java      |    490 +
 .../hive/metastore/tools/HiveSchemaHelper.java  |    673 +
 .../metastore/tools/MetastoreSchemaTool.java    |    460 +
 .../hive/metastore/tools/SQLGenerator.java      |    187 +
 .../metastore/tools/SchemaToolCommandLine.java  |    308 +
 .../hive/metastore/tools/SchemaToolTask.java    |     32 +
 .../tools/SchemaToolTaskAlterCatalog.java       |     90 +
 .../tools/SchemaToolTaskCreateCatalog.java      |    132 +
 .../tools/SchemaToolTaskCreateUser.java         |    115 +
 .../metastore/tools/SchemaToolTaskInfo.java     |     43 +
 .../metastore/tools/SchemaToolTaskInit.java     |     73 +
 .../tools/SchemaToolTaskMoveDatabase.java       |     96 +
 .../tools/SchemaToolTaskMoveTable.java          |    142 +
 .../metastore/tools/SchemaToolTaskUpgrade.java  |    116 +
 .../metastore/tools/SchemaToolTaskValidate.java |    630 +
 .../hadoop/hive/metastore/tools/SmokeTest.java  |    102 +
 .../txn/AcidCompactionHistoryService.java       |     71 +
 .../metastore/txn/AcidHouseKeeperService.java   |     71 +
 .../txn/AcidOpenTxnsCounterService.java         |     72 +
 .../hive/metastore/txn/AcidWriteSetService.java |     69 +
 .../hive/metastore/txn/CompactionInfo.java      |    170 +
 .../metastore/txn/CompactionTxnHandler.java     |   1158 +
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |    599 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   4949 +
 .../hadoop/hive/metastore/txn/TxnStore.java     |    496 +
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    481 +
 .../hive/metastore/utils/CommonCliOptions.java  |    160 +
 .../hadoop/hive/metastore/utils/FileUtils.java  |    537 +
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |    395 +
 .../metastore/utils/HiveStrictManagedUtils.java |    100 +
 .../hadoop/hive/metastore/utils/JavaUtils.java  |    130 +
 .../hadoop/hive/metastore/utils/LogUtils.java   |    140 +
 .../hive/metastore/utils/MetaStoreUtils.java    |   1840 +
 .../metastore/utils/MetastoreVersionInfo.java   |    133 +
 .../hadoop/hive/metastore/utils/ObjectPair.java |     86 +
 .../hive/metastore/utils/SecurityUtils.java     |    313 +
 .../hive/metastore/utils/StringUtils.java       |    130 +
 .../hive/metastore/utils/StringableMap.java     |     80 +
 .../MetastoreDelegationTokenSupport.java        |     68 +
 .../hadoop/hive/metastore/metastore.proto       |     29 +
 .../main/resources/datanucleus-log4j.properties |     17 +
 .../main/resources/metastore-log4j2.properties  |     71 +
 .../src/main/resources/metastore-site.xml       |     34 +
 .../src/main/resources/package.jdo              |   1426 +
 .../src/main/resources/saveVersion.sh           |     91 +
 .../src/main/resources/thrift-replacements.txt  |    106 +
 .../metastore-common/src/main/scripts/base      |    231 +
 .../src/main/scripts/ext/metastore.sh           |     41 +
 .../src/main/scripts/ext/schemaTool.sh          |     33 +
 .../src/main/scripts/ext/smokeTest.sh           |     33 +
 .../src/main/scripts/metastore-config.sh        |     69 +
 .../src/main/scripts/schematool                 |     21 +
 .../src/main/scripts/start-metastore            |     22 +
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |    405 +
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |    692 +
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |    710 +
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |    710 +
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |     62 +
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |     22 +
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |     59 +
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |      5 +
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |    283 +
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |     49 +
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |      8 +
 .../src/main/sql/derby/upgrade.order.derby      |     18 +
 .../src/main/sql/mssql/create-user.mssql.sql    |      5 +
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |    947 +
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |   1246 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   1271 +
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   1272 +
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |     73 +
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |     39 +
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |     43 +
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |      7 +
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |    352 +
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |     51 +
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |     10 +
 .../src/main/sql/mssql/upgrade.order.mssql      |     12 +
 .../src/main/sql/mysql/create-user.mysql.sql    |      8 +
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |    910 +
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |   1183 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   1208 +
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   1210 +
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |     75 +
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |     42 +
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |     43 +
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |      8 +
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |    326 +
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |     51 +
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |     10 +
 .../src/main/sql/mysql/upgrade.order.mysql      |     18 +
 .../src/main/sql/oracle/create-user.oracle.sql  |      3 +
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |    856 +
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |   1140 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   1165 +
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   1167 +
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |     83 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |     39 +
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |     58 +
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |      7 +
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |    343 +
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |     51 +
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |      9 +
 .../src/main/sql/oracle/upgrade.order.oracle    |     14 +
 .../main/sql/postgres/create-user.postgres.sql  |      2 +
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |   1562 +
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |   1827 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   1856 +
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   1860 +
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |     73 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |     40 +
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |     39 +
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |      8 +
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |    360 +
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |     53 +
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |     10 +
 .../main/sql/postgres/upgrade.order.postgres    |     18 +
 .../src/main/thrift/hive_metastore.thrift       |   2318 +
 .../hadoop/hive/common/TestStatsSetupConst.java |    114 +
 .../ndv/fm/TestFMSketchSerialization.java       |    101 +
 .../hive/common/ndv/hll/TestHLLNoBias.java      |    117 +
 .../common/ndv/hll/TestHLLSerialization.java    |    270 +
 .../hive/common/ndv/hll/TestHyperLogLog.java    |    338 +
 .../common/ndv/hll/TestHyperLogLogDense.java    |     85 +
 .../common/ndv/hll/TestHyperLogLogMerge.java    |    147 +
 .../common/ndv/hll/TestHyperLogLogSparse.java   |     84 +
 .../common/ndv/hll/TestSparseEncodeHash.java    |     59 +
 .../metastore/AlternateFailurePreListener.java  |     62 +
 .../metastore/DummyEndFunctionListener.java     |     47 +
 .../metastore/DummyJdoConnectionUrlHook.java    |     45 +
 .../hadoop/hive/metastore/DummyListener.java    |    126 +
 .../metastore/DummyMetaStoreInitListener.java   |     39 +
 .../hadoop/hive/metastore/DummyPreListener.java |     49 +
 .../DummyRawStoreControlledCommit.java          |   1268 +
 .../DummyRawStoreForJdoConnection.java          |   1247 +
 .../apache/hadoop/hive/metastore/FakeDerby.java |    404 +
 .../HiveMetaStoreClientPreCatalog.java          |   3535 +
 .../InjectableBehaviourObjectStore.java         |    218 +
 .../hive/metastore/IpAddressListener.java       |    102 +
 .../hive/metastore/MetaStoreTestUtils.java      |    291 +
 .../MockPartitionExpressionForMetastore.java    |     58 +
 .../hive/metastore/NonCatCallsWithCatalog.java  |   1158 +
 .../hadoop/hive/metastore/TestAdminUser.java    |     49 +
 .../hive/metastore/TestAggregateStatsCache.java |    272 +
 .../metastore/TestCatalogNonDefaultClient.java  |     74 +
 .../metastore/TestCatalogNonDefaultSvr.java     |     68 +
 .../hive/metastore/TestCatalogOldClient.java    |     44 +
 .../hadoop/hive/metastore/TestDeadline.java     |    130 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |     51 +
 .../hadoop/hive/metastore/TestFilterHooks.java  |    254 +
 .../hive/metastore/TestHiveAlterHandler.java    |    121 +
 .../hive/metastore/TestHiveMetaStore.java       |   3103 +
 .../metastore/TestHiveMetaStoreGetMetaConf.java |    115 +
 .../TestHiveMetaStorePartitionSpecs.java        |    383 +
 .../TestHiveMetaStoreSchemaMethods.java         |   1248 +
 .../metastore/TestHiveMetaStoreTimeout.java     |    142 +
 .../hive/metastore/TestHiveMetaStoreTxns.java   |    267 +
 ...TestHiveMetaStoreWithEnvironmentContext.java |    191 +
 .../hive/metastore/TestHiveMetastoreCli.java    |     68 +
 .../hive/metastore/TestLockRequestBuilder.java  |    587 +
 .../hive/metastore/TestMarkPartition.java       |    118 +
 .../hive/metastore/TestMarkPartitionRemote.java |     34 +
 .../TestMetaStoreConnectionUrlHook.java         |     49 +
 .../TestMetaStoreEndFunctionListener.java       |    146 +
 .../metastore/TestMetaStoreEventListener.java   |    471 +
 .../TestMetaStoreEventListenerOnlyOnCommit.java |    121 +
 .../TestMetaStoreEventListenerWithOldConf.java  |    129 +
 .../metastore/TestMetaStoreInitListener.java    |     56 +
 .../metastore/TestMetaStoreListenersError.java  |     97 +
 ...stMetaStoreMaterializationsCacheCleaner.java |    328 +
 .../metastore/TestMetaStoreSchemaFactory.java   |     72 +
 .../hive/metastore/TestMetaStoreSchemaInfo.java |     55 +
 .../hadoop/hive/metastore/TestObjectStore.java  |    904 +
 .../metastore/TestObjectStoreInitRetry.java     |    135 +
 .../metastore/TestObjectStoreSchemaMethods.java |    602 +
 .../hadoop/hive/metastore/TestOldSchema.java    |    233 +
 .../TestPartitionNameWhitelistValidation.java   |    125 +
 .../hive/metastore/TestRawStoreProxy.java       |     67 +
 .../hive/metastore/TestRemoteHiveMetaStore.java |     64 +
 .../TestRemoteHiveMetaStoreIpAddress.java       |     66 +
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |     31 +
 .../TestRetriesInRetryingHMSHandler.java        |    111 +
 .../hive/metastore/TestRetryingHMSHandler.java  |     82 +
 .../metastore/TestSetUGIOnBothClientServer.java |     34 +
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |     35 +
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |     35 +
 .../apache/hadoop/hive/metastore/TestStats.java |    732 +
 .../hive/metastore/VerifyingObjectStore.java    |    219 +
 .../annotation/MetastoreCheckinTest.java        |     25 +
 .../metastore/annotation/MetastoreTest.java     |     24 +
 .../metastore/annotation/MetastoreUnitTest.java |     25 +
 .../hive/metastore/cache/TestCachedStore.java   |   1075 +
 .../metastore/cache/TestCatalogCaching.java     |    142 +
 .../metastore/client/MetaStoreClientTest.java   |     95 +
 .../client/MetaStoreFactoryForTests.java        |    112 +
 .../metastore/client/TestAddPartitions.java     |   1736 +
 .../client/TestAddPartitionsFromPartSpec.java   |   1267 +
 .../metastore/client/TestAlterPartitions.java   |   1130 +
 .../metastore/client/TestAppendPartitions.java  |    594 +
 .../hive/metastore/client/TestCatalogs.java     |    267 +
 .../metastore/client/TestCheckConstraint.java   |    363 +
 .../hive/metastore/client/TestDatabases.java    |    634 +
 .../metastore/client/TestDefaultConstraint.java |    363 +
 .../metastore/client/TestDropPartitions.java    |    659 +
 .../client/TestExchangePartitions.java          |   1337 +
 .../hive/metastore/client/TestForeignKey.java   |    538 +
 .../hive/metastore/client/TestFunctions.java    |    765 +
 .../metastore/client/TestGetPartitions.java     |    608 +
 .../hive/metastore/client/TestGetTableMeta.java |    330 +
 .../metastore/client/TestListPartitions.java    |   1522 +
 .../metastore/client/TestNotNullConstraint.java |    355 +
 .../hive/metastore/client/TestPrimaryKey.java   |    468 +
 .../hive/metastore/client/TestRuntimeStats.java |    154 +
 .../TestTablesCreateDropAlterTruncate.java      |   1384 +
 .../metastore/client/TestTablesGetExists.java   |    514 +
 .../hive/metastore/client/TestTablesList.java   |    320 +
 .../metastore/client/TestUniqueConstraint.java  |    356 +
 .../hive/metastore/client/package-info.java     |     22 +
 .../merge/DecimalColumnStatsMergerTest.java     |    235 +
 .../hive/metastore/conf/TestMetastoreConf.java  |    433 +
 .../TestDataSourceProviderFactory.java          |    248 +
 .../hive/metastore/dbinstall/DbInstallBase.java |    265 +
 .../hive/metastore/dbinstall/ITestMysql.java    |     82 +
 .../hive/metastore/dbinstall/ITestOracle.java   |     83 +
 .../hive/metastore/dbinstall/ITestPostgres.java |     82 +
 .../metastore/dbinstall/ITestSqlServer.java     |     84 +
 .../json/TestJSONMessageDeserializer.java       |    115 +
 .../hive/metastore/metrics/TestMetrics.java     |    164 +
 .../minihms/AbstractMetaStoreService.java       |    173 +
 .../minihms/ClusterMetaStoreForTests.java       |     32 +
 .../minihms/EmbeddedMetaStoreForTests.java      |     33 +
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |     76 +
 .../minihms/RemoteMetaStoreForTests.java        |     43 +
 .../hive/metastore/minihms/package-info.java    |     23 +
 .../tools/TestMetastoreSchemaTool.java          |     70 +
 .../tools/TestSchemaToolForMetastore.java       |    534 +
 .../metastore/txn/TestTxnHandlerNegative.java   |     58 +
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |    239 +
 .../hive/metastore/utils/TestHdfsUtils.java     |    348 +
 .../metastore/utils/TestMetaStoreUtils.java     |    291 +
 .../src/test/resources/log4j2.properties        |     35 +
 standalone-metastore/pom.xml                    |   1000 +-
 standalone-metastore/src/assembly/bin.xml       |    136 -
 standalone-metastore/src/assembly/src.xml       |     53 -
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  95283 ------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  30210 --
 .../ThriftHiveMetastore_server.skeleton.cpp     |   1084 -
 .../thrift/gen-cpp/hive_metastore_constants.cpp |     67 -
 .../thrift/gen-cpp/hive_metastore_constants.h   |     49 -
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  34088 ---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  14473 -
 .../hive/metastore/api/AbortTxnRequest.java     |    497 -
 .../hive/metastore/api/AbortTxnsRequest.java    |    438 -
 .../api/AddCheckConstraintRequest.java          |    443 -
 .../api/AddDefaultConstraintRequest.java        |    443 -
 .../metastore/api/AddDynamicPartitions.java     |    959 -
 .../metastore/api/AddForeignKeyRequest.java     |    443 -
 .../api/AddNotNullConstraintRequest.java        |    443 -
 .../metastore/api/AddPartitionsRequest.java     |   1162 -
 .../hive/metastore/api/AddPartitionsResult.java |    550 -
 .../metastore/api/AddPrimaryKeyRequest.java     |    443 -
 .../api/AddUniqueConstraintRequest.java         |    443 -
 .../hadoop/hive/metastore/api/AggrStats.java    |    645 -
 .../api/AllocateTableWriteIdsRequest.java       |    915 -
 .../api/AllocateTableWriteIdsResponse.java      |    443 -
 .../metastore/api/AlreadyExistsException.java   |    395 -
 .../hive/metastore/api/AlterCatalogRequest.java |    504 -
 .../hive/metastore/api/AlterISchemaRequest.java |    509 -
 .../metastore/api/AlterPartitionsRequest.java   |   1067 -
 .../metastore/api/AlterPartitionsResponse.java  |    283 -
 .../hadoop/hive/metastore/api/BasicTxnInfo.java |    907 -
 .../metastore/api/BinaryColumnStatsData.java    |    696 -
 .../metastore/api/BooleanColumnStatsData.java   |    696 -
 .../metastore/api/CacheFileMetadataRequest.java |    703 -
 .../metastore/api/CacheFileMetadataResult.java  |    387 -
 .../hadoop/hive/metastore/api/Catalog.java      |    606 -
 .../metastore/api/CheckConstraintsRequest.java  |    591 -
 .../metastore/api/CheckConstraintsResponse.java |    443 -
 .../hive/metastore/api/CheckLockRequest.java    |    589 -
 .../metastore/api/ClearFileMetadataRequest.java |    438 -
 .../metastore/api/ClearFileMetadataResult.java  |    283 -
 .../hive/metastore/api/ClientCapabilities.java  |    441 -
 .../hive/metastore/api/ClientCapability.java    |     45 -
 .../hive/metastore/api/CmRecycleRequest.java    |    488 -
 .../hive/metastore/api/CmRecycleResponse.java   |    283 -
 .../hive/metastore/api/ColumnStatistics.java    |    863 -
 .../metastore/api/ColumnStatisticsData.java     |    675 -
 .../metastore/api/ColumnStatisticsDesc.java     |    904 -
 .../hive/metastore/api/ColumnStatisticsObj.java |    593 -
 .../hive/metastore/api/CommitTxnRequest.java    |    657 -
 .../hive/metastore/api/CompactionRequest.java   |    977 -
 .../hive/metastore/api/CompactionResponse.java  |    583 -
 .../hive/metastore/api/CompactionType.java      |     45 -
 .../api/ConfigValSecurityException.java         |    395 -
 .../metastore/api/CreateCatalogRequest.java     |    400 -
 .../hive/metastore/api/CreationMetadata.java    |    851 -
 .../api/CurrentNotificationEventId.java         |    387 -
 .../hive/metastore/api/DataOperationType.java   |     57 -
 .../hadoop/hive/metastore/api/Database.java     |   1201 -
 .../apache/hadoop/hive/metastore/api/Date.java  |    387 -
 .../hive/metastore/api/DateColumnStatsData.java |    823 -
 .../hadoop/hive/metastore/api/Decimal.java      |    497 -
 .../metastore/api/DecimalColumnStatsData.java   |    823 -
 .../api/DefaultConstraintsRequest.java          |    591 -
 .../api/DefaultConstraintsResponse.java         |    443 -
 .../metastore/api/DoubleColumnStatsData.java    |    799 -
 .../hive/metastore/api/DropCatalogRequest.java  |    395 -
 .../metastore/api/DropConstraintRequest.java    |    701 -
 .../hive/metastore/api/DropPartitionsExpr.java  |    505 -
 .../metastore/api/DropPartitionsRequest.java    |   1218 -
 .../metastore/api/DropPartitionsResult.java     |    447 -
 .../hive/metastore/api/EnvironmentContext.java  |    447 -
 .../hive/metastore/api/EventRequestType.java    |     48 -
 .../hadoop/hive/metastore/api/FieldSchema.java  |    603 -
 .../metastore/api/FileMetadataExprType.java     |     42 -
 .../metastore/api/FindSchemasByColsResp.java    |    449 -
 .../metastore/api/FindSchemasByColsRqst.java    |    605 -
 .../hive/metastore/api/FireEventRequest.java    |    967 -
 .../metastore/api/FireEventRequestData.java     |    309 -
 .../hive/metastore/api/FireEventResponse.java   |    283 -
 .../hive/metastore/api/ForeignKeysRequest.java  |    814 -
 .../hive/metastore/api/ForeignKeysResponse.java |    443 -
 .../hadoop/hive/metastore/api/Function.java     |   1306 -
 .../hadoop/hive/metastore/api/FunctionType.java |     42 -
 .../metastore/api/GetAllFunctionsResponse.java  |    447 -
 .../hive/metastore/api/GetCatalogRequest.java   |    395 -
 .../hive/metastore/api/GetCatalogResponse.java  |    400 -
 .../hive/metastore/api/GetCatalogsResponse.java |    444 -
 .../api/GetFileMetadataByExprRequest.java       |    773 -
 .../api/GetFileMetadataByExprResult.java        |    553 -
 .../metastore/api/GetFileMetadataRequest.java   |    438 -
 .../metastore/api/GetFileMetadataResult.java    |    540 -
 .../metastore/api/GetOpenTxnsInfoResponse.java  |    542 -
 .../hive/metastore/api/GetOpenTxnsResponse.java |    750 -
 .../api/GetPrincipalsInRoleRequest.java         |    389 -
 .../api/GetPrincipalsInRoleResponse.java        |    443 -
 .../api/GetRoleGrantsForPrincipalRequest.java   |    502 -
 .../api/GetRoleGrantsForPrincipalResponse.java  |    443 -
 .../metastore/api/GetRuntimeStatsRequest.java   |    482 -
 .../hive/metastore/api/GetSerdeRequest.java     |    395 -
 .../hive/metastore/api/GetTableRequest.java     |    922 -
 .../hive/metastore/api/GetTableResult.java      |    501 -
 .../hive/metastore/api/GetTablesRequest.java    |    765 -
 .../hive/metastore/api/GetTablesResult.java     |    443 -
 .../metastore/api/GetValidWriteIdsRequest.java  |    539 -
 .../metastore/api/GetValidWriteIdsResponse.java |    443 -
 .../api/GrantRevokePrivilegeRequest.java        |    620 -
 .../api/GrantRevokePrivilegeResponse.java       |    390 -
 .../metastore/api/GrantRevokeRoleRequest.java   |   1059 -
 .../metastore/api/GrantRevokeRoleResponse.java  |    390 -
 .../hive/metastore/api/GrantRevokeType.java     |     45 -
 .../hive/metastore/api/HeartbeatRequest.java    |    489 -
 .../metastore/api/HeartbeatTxnRangeRequest.java |    482 -
 .../api/HeartbeatTxnRangeResponse.java          |    588 -
 .../hive/metastore/api/HiveObjectPrivilege.java |    833 -
 .../hive/metastore/api/HiveObjectRef.java       |    979 -
 .../hive/metastore/api/HiveObjectType.java      |     54 -
 .../hadoop/hive/metastore/api/ISchema.java      |   1266 -
 .../hadoop/hive/metastore/api/ISchemaName.java  |    603 -
 .../metastore/api/InsertEventRequestData.java   |    855 -
 .../metastore/api/InvalidInputException.java    |    395 -
 .../metastore/api/InvalidObjectException.java   |    395 -
 .../api/InvalidOperationException.java          |    395 -
 .../api/InvalidPartitionException.java          |    395 -
 .../hive/metastore/api/LockComponent.java       |   1158 -
 .../hadoop/hive/metastore/api/LockLevel.java    |     48 -
 .../hadoop/hive/metastore/api/LockRequest.java  |    861 -
 .../hadoop/hive/metastore/api/LockResponse.java |    500 -
 .../hadoop/hive/metastore/api/LockState.java    |     51 -
 .../hadoop/hive/metastore/api/LockType.java     |     48 -
 .../hive/metastore/api/LongColumnStatsData.java |    799 -
 .../api/MapSchemaVersionToSerdeRequest.java     |    504 -
 .../hive/metastore/api/Materialization.java     |    750 -
 .../hive/metastore/api/MetaException.java       |    395 -
 .../hive/metastore/api/MetadataPpdResult.java   |    517 -
 .../hive/metastore/api/NoSuchLockException.java |    395 -
 .../metastore/api/NoSuchObjectException.java    |    395 -
 .../hive/metastore/api/NoSuchTxnException.java  |    395 -
 .../api/NotNullConstraintsRequest.java          |    591 -
 .../api/NotNullConstraintsResponse.java         |    443 -
 .../hive/metastore/api/NotificationEvent.java   |   1112 -
 .../metastore/api/NotificationEventRequest.java |    490 -
 .../api/NotificationEventResponse.java          |    443 -
 .../api/NotificationEventsCountRequest.java     |    598 -
 .../api/NotificationEventsCountResponse.java    |    387 -
 .../hive/metastore/api/OpenTxnRequest.java      |    963 -
 .../hive/metastore/api/OpenTxnsResponse.java    |    438 -
 .../apache/hadoop/hive/metastore/api/Order.java |    497 -
 .../hadoop/hive/metastore/api/Partition.java    |   1535 -
 .../hive/metastore/api/PartitionEventType.java  |     42 -
 .../api/PartitionListComposingSpec.java         |    449 -
 .../hive/metastore/api/PartitionSpec.java       |   1136 -
 .../api/PartitionSpecWithSharedSD.java          |    558 -
 .../metastore/api/PartitionValuesRequest.java   |   1328 -
 .../metastore/api/PartitionValuesResponse.java  |    443 -
 .../hive/metastore/api/PartitionValuesRow.java  |    438 -
 .../hive/metastore/api/PartitionWithoutSD.java  |   1016 -
 .../metastore/api/PartitionsByExprRequest.java  |    921 -
 .../metastore/api/PartitionsByExprResult.java   |    542 -
 .../metastore/api/PartitionsStatsRequest.java   |   1111 -
 .../metastore/api/PartitionsStatsResult.java    |    597 -
 .../hive/metastore/api/PrimaryKeysRequest.java  |    600 -
 .../hive/metastore/api/PrimaryKeysResponse.java |    443 -
 .../metastore/api/PrincipalPrivilegeSet.java    |    906 -
 .../hive/metastore/api/PrincipalType.java       |     48 -
 .../hadoop/hive/metastore/api/PrivilegeBag.java |    449 -
 .../hive/metastore/api/PrivilegeGrantInfo.java  |    815 -
 .../metastore/api/PutFileMetadataRequest.java   |    710 -
 .../metastore/api/PutFileMetadataResult.java    |    283 -
 .../api/ReplTblWriteIdStateRequest.java         |    952 -
 .../hive/metastore/api/RequestPartsSpec.java    |    438 -
 .../hadoop/hive/metastore/api/ResourceType.java |     48 -
 .../hadoop/hive/metastore/api/ResourceUri.java  |    511 -
 .../apache/hadoop/hive/metastore/api/Role.java  |    601 -
 .../hive/metastore/api/RolePrincipalGrant.java  |   1035 -
 .../hadoop/hive/metastore/api/RuntimeStat.java  |    600 -
 .../hive/metastore/api/SQLCheckConstraint.java  |   1213 -
 .../metastore/api/SQLDefaultConstraint.java     |   1213 -
 .../hive/metastore/api/SQLForeignKey.java       |   1822 -
 .../metastore/api/SQLNotNullConstraint.java     |   1109 -
 .../hive/metastore/api/SQLPrimaryKey.java       |   1210 -
 .../hive/metastore/api/SQLUniqueConstraint.java |   1207 -
 .../hadoop/hive/metastore/api/Schema.java       |    605 -
 .../hive/metastore/api/SchemaCompatibility.java |     51 -
 .../hadoop/hive/metastore/api/SchemaType.java   |     45 -
 .../hive/metastore/api/SchemaValidation.java    |     45 -
 .../hive/metastore/api/SchemaVersion.java       |   1412 -
 .../metastore/api/SchemaVersionDescriptor.java  |    502 -
 .../hive/metastore/api/SchemaVersionState.java  |     63 -
 .../hadoop/hive/metastore/api/SerDeInfo.java    |   1092 -
 .../hadoop/hive/metastore/api/SerdeType.java    |     45 -
 .../api/SetPartitionsStatsRequest.java          |    858 -
 .../api/SetSchemaVersionStateRequest.java       |    516 -
 .../hive/metastore/api/ShowCompactRequest.java  |    283 -
 .../hive/metastore/api/ShowCompactResponse.java |    443 -
 .../api/ShowCompactResponseElement.java         |   1641 -
 .../hive/metastore/api/ShowLocksRequest.java    |    710 -
 .../hive/metastore/api/ShowLocksResponse.java   |    449 -
 .../metastore/api/ShowLocksResponseElement.java |   1929 -
 .../hadoop/hive/metastore/api/SkewedInfo.java   |    834 -
 .../hive/metastore/api/StorageDescriptor.java   |   1748 -
 .../metastore/api/StringColumnStatsData.java    |    791 -
 .../apache/hadoop/hive/metastore/api/Table.java |   2483 -
 .../hadoop/hive/metastore/api/TableMeta.java    |    807 -
 .../hive/metastore/api/TableStatsRequest.java   |    961 -
 .../hive/metastore/api/TableStatsResult.java    |    550 -
 .../hive/metastore/api/TableValidWriteIds.java  |    851 -
 .../hive/metastore/api/ThriftHiveMetastore.java | 240381 ----------------
 .../hive/metastore/api/TxnAbortedException.java |    395 -
 .../hadoop/hive/metastore/api/TxnInfo.java      |   1220 -
 .../hive/metastore/api/TxnOpenException.java    |    395 -
 .../hadoop/hive/metastore/api/TxnState.java     |     48 -
 .../hadoop/hive/metastore/api/TxnToWriteId.java |    482 -
 .../apache/hadoop/hive/metastore/api/Type.java  |    768 -
 .../metastore/api/UniqueConstraintsRequest.java |    591 -
 .../api/UniqueConstraintsResponse.java          |    443 -
 .../hive/metastore/api/UnknownDBException.java  |    395 -
 .../api/UnknownPartitionException.java          |    395 -
 .../metastore/api/UnknownTableException.java    |    395 -
 .../hive/metastore/api/UnlockRequest.java       |    387 -
 .../hadoop/hive/metastore/api/Version.java      |    499 -
 .../hive/metastore/api/WMAlterPoolRequest.java  |    504 -
 .../hive/metastore/api/WMAlterPoolResponse.java |    283 -
 .../api/WMAlterResourcePlanRequest.java         |    805 -
 .../api/WMAlterResourcePlanResponse.java        |    398 -
 .../metastore/api/WMAlterTriggerRequest.java    |    398 -
 .../metastore/api/WMAlterTriggerResponse.java   |    283 -
 ...CreateOrDropTriggerToPoolMappingRequest.java |    708 -
 ...reateOrDropTriggerToPoolMappingResponse.java |    283 -
 .../api/WMCreateOrUpdateMappingRequest.java     |    501 -
 .../api/WMCreateOrUpdateMappingResponse.java    |    283 -
 .../hive/metastore/api/WMCreatePoolRequest.java |    398 -
 .../metastore/api/WMCreatePoolResponse.java     |    283 -
 .../api/WMCreateResourcePlanRequest.java        |    504 -
 .../api/WMCreateResourcePlanResponse.java       |    283 -
 .../metastore/api/WMCreateTriggerRequest.java   |    398 -
 .../metastore/api/WMCreateTriggerResponse.java  |    283 -
 .../metastore/api/WMDropMappingRequest.java     |    398 -
 .../metastore/api/WMDropMappingResponse.java    |    283 -
 .../hive/metastore/api/WMDropPoolRequest.java   |    499 -
 .../hive/metastore/api/WMDropPoolResponse.java  |    283 -
 .../api/WMDropResourcePlanRequest.java          |    393 -
 .../api/WMDropResourcePlanResponse.java         |    283 -
 .../metastore/api/WMDropTriggerRequest.java     |    499 -
 .../metastore/api/WMDropTriggerResponse.java    |    283 -
 .../hive/metastore/api/WMFullResourcePlan.java  |   1033 -
 .../api/WMGetActiveResourcePlanRequest.java     |    283 -
 .../api/WMGetActiveResourcePlanResponse.java    |    398 -
 .../api/WMGetAllResourcePlanRequest.java        |    283 -
 .../api/WMGetAllResourcePlanResponse.java       |    447 -
 .../metastore/api/WMGetResourcePlanRequest.java |    393 -
 .../api/WMGetResourcePlanResponse.java          |    398 -
 .../api/WMGetTriggersForResourePlanRequest.java |    393 -
 .../WMGetTriggersForResourePlanResponse.java    |    447 -
 .../hadoop/hive/metastore/api/WMMapping.java    |    804 -
 .../hive/metastore/api/WMNullablePool.java      |    901 -
 .../metastore/api/WMNullableResourcePlan.java   |    918 -
 .../hadoop/hive/metastore/api/WMPool.java       |    802 -
 .../metastore/api/WMPoolSchedulingPolicy.java   |     45 -
 .../hive/metastore/api/WMPoolTrigger.java       |    490 -
 .../hive/metastore/api/WMResourcePlan.java      |    720 -
 .../metastore/api/WMResourcePlanStatus.java     |     48 -
 .../hadoop/hive/metastore/api/WMTrigger.java    |    809 -
 .../api/WMValidateResourcePlanRequest.java      |    393 -
 .../api/WMValidateResourcePlanResponse.java     |    597 -
 .../hive/metastore/api/WriteEventInfo.java      |   1012 -
 .../api/WriteNotificationLogRequest.java        |    949 -
 .../api/WriteNotificationLogResponse.java       |    283 -
 .../metastore/api/hive_metastoreConstants.java  |     89 -
 .../gen-php/metastore/ThriftHiveMetastore.php   |  60225 ----
 .../src/gen/thrift/gen-php/metastore/Types.php  |  33017 ---
 .../src/gen/thrift/gen-py/__init__.py           |      0
 .../hive_metastore/ThriftHiveMetastore-remote   |   1641 -
 .../hive_metastore/ThriftHiveMetastore.py       |  49183 ----
 .../thrift/gen-py/hive_metastore/__init__.py    |      1 -
 .../thrift/gen-py/hive_metastore/constants.py   |     36 -
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  23608 --
 .../thrift/gen-rb/hive_metastore_constants.rb   |     59 -
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   5419 -
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |  13845 -
 .../hadoop/hive/common/StatsSetupConst.java     |    339 -
 .../common/classification/RetrySemantics.java   |     57 -
 .../common/ndv/NumDistinctValueEstimator.java   |     51 -
 .../ndv/NumDistinctValueEstimatorFactory.java   |     75 -
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |    359 -
 .../hive/common/ndv/fm/FMSketchUtils.java       |    132 -
 .../hive/common/ndv/hll/HLLConstants.java       |    933 -
 .../hive/common/ndv/hll/HLLDenseRegister.java   |    202 -
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |     50 -
 .../hive/common/ndv/hll/HLLSparseRegister.java  |    261 -
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |    664 -
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |    409 -
 .../hive/metastore/AcidEventListener.java       |    146 -
 .../hive/metastore/AggregateStatsCache.java     |    571 -
 .../hadoop/hive/metastore/AlterHandler.java     |    203 -
 .../apache/hadoop/hive/metastore/Batchable.java |     86 -
 .../hadoop/hive/metastore/ColumnType.java       |    301 -
 .../hadoop/hive/metastore/DatabaseProduct.java  |     75 -
 .../apache/hadoop/hive/metastore/Deadline.java  |    172 -
 .../hive/metastore/DeadlineException.java       |     29 -
 .../hive/metastore/DefaultHiveMetaHook.java     |     51 -
 .../DefaultMetaStoreFilterHookImpl.java         |     93 -
 .../DefaultPartitionExpressionProxy.java        |     57 -
 .../metastore/DefaultStorageSchemaReader.java   |     38 -
 .../hadoop/hive/metastore/FileFormatProxy.java  |     64 -
 .../hive/metastore/FileMetadataHandler.java     |    109 -
 .../hive/metastore/FileMetadataManager.java     |    124 -
 .../hive/metastore/HMSMetricsListener.java      |     90 -
 .../hadoop/hive/metastore/HiveAlterHandler.java |    974 -
 .../hive/metastore/HiveMetaException.java       |     42 -
 .../hadoop/hive/metastore/HiveMetaHook.java     |    122 -
 .../hive/metastore/HiveMetaHookLoader.java      |     39 -
 .../hadoop/hive/metastore/HiveMetaStore.java    |   9422 -
 .../hive/metastore/HiveMetaStoreClient.java     |   3435 -
 .../hive/metastore/HiveMetaStoreFsImpl.java     |     55 -
 .../hive/metastore/IExtrapolatePartStatus.java  |     85 -
 .../hadoop/hive/metastore/IHMSHandler.java      |    109 -
 .../hadoop/hive/metastore/IMetaStoreClient.java |   3740 -
 .../hive/metastore/IMetaStoreSchemaInfo.java    |    115 -
 .../metastore/LinearExtrapolatePartStatus.java  |    106 -
 .../hive/metastore/LockComponentBuilder.java    |    121 -
 .../hive/metastore/LockRequestBuilder.java      |    168 -
 .../MaterializationsCacheCleanerTask.java       |     63 -
 .../MaterializationsInvalidationCache.java      |    543 -
 .../MaterializationsRebuildLockCleanerTask.java |     61 -
 .../MaterializationsRebuildLockHandler.java     |    216 -
 .../hive/metastore/MetaStoreDirectSql.java      |   2817 -
 .../metastore/MetaStoreEndFunctionContext.java  |     59 -
 .../metastore/MetaStoreEndFunctionListener.java |     58 -
 .../hive/metastore/MetaStoreEventListener.java  |    306 -
 .../MetaStoreEventListenerConstants.java        |     41 -
 .../hadoop/hive/metastore/MetaStoreFS.java      |     43 -
 .../hive/metastore/MetaStoreFilterHook.java     |    147 -
 .../hadoop/hive/metastore/MetaStoreInit.java    |    109 -
 .../hive/metastore/MetaStoreInitContext.java    |     27 -
 .../hive/metastore/MetaStoreInitListener.java   |     49 -
 .../metastore/MetaStoreListenerNotifier.java    |    375 -
 .../metastore/MetaStorePreEventListener.java    |     57 -
 .../hive/metastore/MetaStoreSchemaInfo.java     |    246 -
 .../metastore/MetaStoreSchemaInfoFactory.java   |     64 -
 .../hadoop/hive/metastore/MetaStoreThread.java  |     58 -
 .../hadoop/hive/metastore/MetadataStore.java    |     52 -
 .../hive/metastore/MetastoreTaskThread.java     |     38 -
 .../hadoop/hive/metastore/ObjectStore.java      |  12377 -
 .../hive/metastore/PartFilterExprUtil.java      |    165 -
 .../hive/metastore/PartitionDropOptions.java    |     54 -
 .../metastore/PartitionExpressionProxy.java     |     73 -
 .../apache/hadoop/hive/metastore/RawStore.java  |   1718 -
 .../hadoop/hive/metastore/RawStoreProxy.java    |    114 -
 .../hive/metastore/ReplChangeManager.java       |    501 -
 .../hive/metastore/RetryingHMSHandler.java      |    232 -
 .../hive/metastore/RetryingMetaStoreClient.java |    341 -
 .../hive/metastore/RuntimeStatsCleanerTask.java |     66 -
 .../metastore/SessionPropertiesListener.java    |     46 -
 .../hive/metastore/StatObjectConverter.java     |    892 -
 .../hive/metastore/StorageSchemaReader.java     |     46 -
 .../hive/metastore/TServerSocketKeepAlive.java  |     47 -
 .../hive/metastore/TSetIpAddressProcessor.java  |     62 -
 .../hive/metastore/TUGIBasedProcessor.java      |    183 -
 .../apache/hadoop/hive/metastore/TableType.java |     26 -
 .../hadoop/hive/metastore/ThreadPool.java       |     63 -
 .../TransactionalMetaStoreEventListener.java    |     39 -
 .../TransactionalValidationListener.java        |    489 -
 .../apache/hadoop/hive/metastore/Warehouse.java |    756 -
 .../annotation/MetastoreVersionAnnotation.java  |     85 -
 .../hive/metastore/annotation/NoReconnect.java  |     29 -
 .../api/InitializeTableWriteIdsRequest.java     |     42 -
 .../hive/metastore/api/utils/DecimalUtils.java  |     49 -
 .../hive/metastore/cache/ByteArrayWrapper.java  |     45 -
 .../hadoop/hive/metastore/cache/CacheUtils.java |    136 -
 .../hive/metastore/cache/CachedStore.java       |   2530 -
 .../hive/metastore/cache/SharedCache.java       |   1650 -
 .../client/builder/CatalogBuilder.java          |     62 -
 .../client/builder/ConstraintBuilder.java       |    115 -
 .../client/builder/DatabaseBuilder.java         |    122 -
 .../client/builder/FunctionBuilder.java         |    143 -
 .../GrantRevokePrivilegeRequestBuilder.java     |     63 -
 .../builder/HiveObjectPrivilegeBuilder.java     |     69 -
 .../client/builder/HiveObjectRefBuilder.java    |     69 -
 .../client/builder/ISchemaBuilder.java          |    102 -
 .../client/builder/PartitionBuilder.java        |    119 -
 .../builder/PrivilegeGrantInfoBuilder.java      |     84 -
 .../metastore/client/builder/RoleBuilder.java   |     55 -
 .../builder/SQLCheckConstraintBuilder.java      |     51 -
 .../builder/SQLDefaultConstraintBuilder.java    |     51 -
 .../client/builder/SQLForeignKeyBuilder.java    |    103 -
 .../builder/SQLNotNullConstraintBuilder.java    |     52 -
 .../client/builder/SQLPrimaryKeyBuilder.java    |     52 -
 .../builder/SQLUniqueConstraintBuilder.java     |     46 -
 .../client/builder/SchemaVersionBuilder.java    |    114 -
 .../client/builder/SerdeAndColsBuilder.java     |    124 -
 .../builder/StorageDescriptorBuilder.java       |    163 -
 .../metastore/client/builder/TableBuilder.java  |    224 -
 .../aggr/BinaryColumnStatsAggregator.java       |     61 -
 .../aggr/BooleanColumnStatsAggregator.java      |     62 -
 .../columnstats/aggr/ColumnStatsAggregator.java |     35 -
 .../aggr/ColumnStatsAggregatorFactory.java      |    113 -
 .../aggr/DateColumnStatsAggregator.java         |    360 -
 .../aggr/DecimalColumnStatsAggregator.java      |    375 -
 .../aggr/DoubleColumnStatsAggregator.java       |    348 -
 .../aggr/IExtrapolatePartStatus.java            |     47 -
 .../aggr/LongColumnStatsAggregator.java         |    348 -
 .../aggr/StringColumnStatsAggregator.java       |    304 -
 .../cache/DateColumnStatsDataInspector.java     |    124 -
 .../cache/DecimalColumnStatsDataInspector.java  |    124 -
 .../cache/DoubleColumnStatsDataInspector.java   |    124 -
 .../cache/LongColumnStatsDataInspector.java     |    124 -
 .../cache/StringColumnStatsDataInspector.java   |    125 -
 .../merge/BinaryColumnStatsMerger.java          |     35 -
 .../merge/BooleanColumnStatsMerger.java         |     35 -
 .../columnstats/merge/ColumnStatsMerger.java    |     31 -
 .../merge/ColumnStatsMergerFactory.java         |    120 -
 .../merge/DateColumnStatsMerger.java            |     59 -
 .../merge/DecimalColumnStatsMerger.java         |     85 -
 .../merge/DoubleColumnStatsMerger.java          |     54 -
 .../merge/LongColumnStatsMerger.java            |     54 -
 .../merge/StringColumnStatsMerger.java          |     54 -
 .../metastore/conf/ConfTemplatePrinter.java     |    150 -
 .../hive/metastore/conf/EnumValidator.java      |     26 -
 .../hive/metastore/conf/MetastoreConf.java      |   1688 -
 .../hive/metastore/conf/RangeValidator.java     |     38 -
 .../hive/metastore/conf/SizeValidator.java      |    110 -
 .../hive/metastore/conf/StringSetValidator.java |     51 -
 .../hive/metastore/conf/TimeValidator.java      |     67 -
 .../hadoop/hive/metastore/conf/Validator.java   |     87 -
 .../datasource/BoneCPDataSourceProvider.java    |     87 -
 .../datasource/DataSourceProvider.java          |     79 -
 .../datasource/DataSourceProviderFactory.java   |     66 -
 .../datasource/DbCPDataSourceProvider.java      |    117 -
 .../datasource/HikariCPDataSourceProvider.java  |     89 -
 .../hive/metastore/datasource/package-info.java |     23 -
 .../hive/metastore/events/AbortTxnEvent.java    |     51 -
 .../hive/metastore/events/AcidWriteEvent.java   |     91 -
 .../metastore/events/AddForeignKeyEvent.java    |     41 -
 .../events/AddNotNullConstraintEvent.java       |     42 -
 .../metastore/events/AddPartitionEvent.java     |     84 -
 .../metastore/events/AddPrimaryKeyEvent.java    |     42 -
 .../metastore/events/AddSchemaVersionEvent.java |     40 -
 .../events/AddUniqueConstraintEvent.java        |     42 -
 .../metastore/events/AllocWriteIdEvent.java     |     57 -
 .../metastore/events/AlterCatalogEvent.java     |     44 -
 .../metastore/events/AlterDatabaseEvent.java    |     56 -
 .../metastore/events/AlterISchemaEvent.java     |     45 -
 .../metastore/events/AlterPartitionEvent.java   |     75 -
 .../events/AlterSchemaVersionEvent.java         |     46 -
 .../hive/metastore/events/AlterTableEvent.java  |     63 -
 .../hive/metastore/events/CommitTxnEvent.java   |     51 -
 .../metastore/events/ConfigChangeEvent.java     |     52 -
 .../metastore/events/CreateCatalogEvent.java    |     39 -
 .../metastore/events/CreateDatabaseEvent.java   |     43 -
 .../metastore/events/CreateFunctionEvent.java   |     43 -
 .../metastore/events/CreateISchemaEvent.java    |     39 -
 .../hive/metastore/events/CreateTableEvent.java |     43 -
 .../hive/metastore/events/DropCatalogEvent.java |     39 -
 .../metastore/events/DropConstraintEvent.java   |     57 -
 .../metastore/events/DropDatabaseEvent.java     |     43 -
 .../metastore/events/DropFunctionEvent.java     |     43 -
 .../hive/metastore/events/DropISchemaEvent.java |     39 -
 .../metastore/events/DropPartitionEvent.java    |     70 -
 .../events/DropSchemaVersionEvent.java          |     40 -
 .../hive/metastore/events/DropTableEvent.java   |     54 -
 .../hive/metastore/events/EventCleanerTask.java |     66 -
 .../hive/metastore/events/InsertEvent.java      |    132 -
 .../hive/metastore/events/ListenerEvent.java    |    187 -
 .../events/LoadPartitionDoneEvent.java          |     57 -
 .../hive/metastore/events/OpenTxnEvent.java     |     51 -
 .../metastore/events/PreAddPartitionEvent.java  |     79 -
 .../events/PreAddSchemaVersionEvent.java        |     39 -
 .../metastore/events/PreAlterCatalogEvent.java  |     40 -
 .../metastore/events/PreAlterDatabaseEvent.java |     47 -
 .../metastore/events/PreAlterISchemaEvent.java  |     44 -
 .../events/PreAlterPartitionEvent.java          |     65 -
 .../events/PreAlterSchemaVersionEvent.java      |     45 -
 .../metastore/events/PreAlterTableEvent.java    |     53 -
 .../events/PreAuthorizationCallEvent.java       |     33 -
 .../metastore/events/PreCreateCatalogEvent.java |     39 -
 .../events/PreCreateDatabaseEvent.java          |     43 -
 .../metastore/events/PreCreateISchemaEvent.java |     39 -
 .../metastore/events/PreCreateTableEvent.java   |     43 -
 .../metastore/events/PreDropCatalogEvent.java   |     39 -
 .../metastore/events/PreDropDatabaseEvent.java  |     43 -
 .../metastore/events/PreDropISchemaEvent.java   |     39 -
 .../metastore/events/PreDropPartitionEvent.java |     67 -
 .../events/PreDropSchemaVersionEvent.java       |     39 -
 .../metastore/events/PreDropTableEvent.java     |     55 -
 .../hive/metastore/events/PreEventContext.java  |     82 -
 .../events/PreLoadPartitionDoneEvent.java       |     64 -
 .../metastore/events/PreReadCatalogEvent.java   |     39 -
 .../metastore/events/PreReadDatabaseEvent.java  |     46 -
 .../metastore/events/PreReadISchemaEvent.java   |     39 -
 .../metastore/events/PreReadTableEvent.java     |     47 -
 .../events/PreReadhSchemaVersionEvent.java      |     36 -
 .../metastore/hooks/JDOConnectionURLHook.java   |     52 -
 .../hive/metastore/hooks/URIResolverHook.java   |     37 -
 .../metastore/messaging/AbortTxnMessage.java    |     36 -
 .../metastore/messaging/AcidWriteMessage.java   |     50 -
 .../messaging/AddForeignKeyMessage.java         |     36 -
 .../messaging/AddNotNullConstraintMessage.java  |     36 -
 .../messaging/AddPartitionMessage.java          |     68 -
 .../messaging/AddPrimaryKeyMessage.java         |     35 -
 .../messaging/AddUniqueConstraintMessage.java   |     36 -
 .../messaging/AllocWriteIdMessage.java          |     36 -
 .../messaging/AlterCatalogMessage.java          |     29 -
 .../messaging/AlterDatabaseMessage.java         |     36 -
 .../messaging/AlterPartitionMessage.java        |     69 -
 .../metastore/messaging/AlterTableMessage.java  |     58 -
 .../metastore/messaging/CommitTxnMessage.java   |     59 -
 .../messaging/CreateCatalogMessage.java         |     25 -
 .../messaging/CreateDatabaseMessage.java        |     31 -
 .../messaging/CreateFunctionMessage.java        |     46 -
 .../metastore/messaging/CreateTableMessage.java |     53 -
 .../metastore/messaging/DropCatalogMessage.java |     25 -
 .../messaging/DropConstraintMessage.java        |     29 -
 .../messaging/DropDatabaseMessage.java          |     27 -
 .../messaging/DropFunctionMessage.java          |     38 -
 .../messaging/DropPartitionMessage.java         |     49 -
 .../metastore/messaging/DropTableMessage.java   |     46 -
 .../hive/metastore/messaging/EventMessage.java  |    127 -
 .../hive/metastore/messaging/EventUtils.java    |    202 -
 .../hive/metastore/messaging/InsertMessage.java |     75 -
 .../messaging/MessageDeserializer.java          |    200 -
 .../metastore/messaging/MessageFactory.java     |    341 -
 .../metastore/messaging/OpenTxnMessage.java     |     38 -
 .../metastore/messaging/PartitionFiles.java     |     53 -
 .../messaging/event/filters/AndFilter.java      |     39 -
 .../messaging/event/filters/BasicFilter.java    |     33 -
 .../event/filters/DatabaseAndTableFilter.java   |     65 -
 .../event/filters/EventBoundaryFilter.java      |     34 -
 .../event/filters/MessageFormatFilter.java      |     36 -
 .../messaging/json/JSONAbortTxnMessage.java     |     88 -
 .../messaging/json/JSONAcidWriteMessage.java    |    150 -
 .../json/JSONAddForeignKeyMessage.java          |    102 -
 .../json/JSONAddNotNullConstraintMessage.java   |     97 -
 .../messaging/json/JSONAddPartitionMessage.java |    175 -
 .../json/JSONAddPrimaryKeyMessage.java          |    102 -
 .../json/JSONAddUniqueConstraintMessage.java    |     99 -
 .../messaging/json/JSONAllocWriteIdMessage.java |    113 -
 .../messaging/json/JSONAlterCatalogMessage.java |     90 -
 .../json/JSONAlterDatabaseMessage.java          |     97 -
 .../json/JSONAlterPartitionMessage.java         |    153 -
 .../messaging/json/JSONAlterTableMessage.java   |    128 -
 .../messaging/json/JSONCommitTxnMessage.java    |    183 -
 .../json/JSONCreateCatalogMessage.java          |     80 -
 .../json/JSONCreateDatabaseMessage.java         |     85 -
 .../json/JSONCreateFunctionMessage.java         |     87 -
 .../messaging/json/JSONCreateTableMessage.java  |    134 -
 .../messaging/json/JSONDropCatalogMessage.java  |     67 -
 .../json/JSONDropConstraintMessage.java         |     91 -
 .../messaging/json/JSONDropDatabaseMessage.java |     72 -
 .../messaging/json/JSONDropFunctionMessage.java |     79 -
 .../json/JSONDropPartitionMessage.java          |    135 -
 .../messaging/json/JSONDropTableMessage.java    |    121 -
 .../messaging/json/JSONInsertMessage.java       |    148 -
 .../messaging/json/JSONMessageDeserializer.java |    273 -
 .../messaging/json/JSONMessageFactory.java      |    402 -
 .../messaging/json/JSONOpenTxnMessage.java      |    106 -
 .../hive/metastore/metrics/JsonReporter.java    |    223 -
 .../hive/metastore/metrics/JvmPauseMonitor.java |    222 -
 .../hadoop/hive/metastore/metrics/Metrics.java  |    244 -
 .../metastore/metrics/MetricsConstants.java     |     46 -
 .../hive/metastore/metrics/PerfLogger.java      |    194 -
 .../hadoop/hive/metastore/model/MCatalog.java   |     58 -
 .../hive/metastore/model/MColumnDescriptor.java |     51 -
 .../hive/metastore/model/MConstraint.java       |    214 -
 .../hive/metastore/model/MCreationMetadata.java |     87 -
 .../hive/metastore/model/MDBPrivilege.java      |    142 -
 .../hadoop/hive/metastore/model/MDatabase.java  |    157 -
 .../hive/metastore/model/MDelegationToken.java  |     45 -
 .../hive/metastore/model/MFieldSchema.java      |     80 -
 .../hadoop/hive/metastore/model/MFunction.java  |    119 -
 .../hive/metastore/model/MGlobalPrivilege.java  |    130 -
 .../hadoop/hive/metastore/model/MISchema.java   |    107 -
 .../hadoop/hive/metastore/model/MIndex.java     |    200 -
 .../hadoop/hive/metastore/model/MMasterKey.java |     55 -
 .../metastore/model/MMetastoreDBProperties.java |     56 -
 .../hive/metastore/model/MNotificationLog.java  |    108 -
 .../metastore/model/MNotificationNextId.java    |     42 -
 .../hadoop/hive/metastore/model/MOrder.java     |     62 -
 .../hadoop/hive/metastore/model/MPartition.java |    162 -
 .../model/MPartitionColumnPrivilege.java        |    171 -
 .../model/MPartitionColumnStatistics.java       |    281 -
 .../hive/metastore/model/MPartitionEvent.java   |     97 -
 .../metastore/model/MPartitionPrivilege.java    |    149 -
 .../hive/metastore/model/MPrincipalDesc.java    |     59 -
 .../hive/metastore/model/MResourceUri.java      |     49 -
 .../hadoop/hive/metastore/model/MRole.java      |     80 -
 .../hadoop/hive/metastore/model/MRoleMap.java   |    120 -
 .../hive/metastore/model/MRuntimeStat.java      |     59 -
 .../hive/metastore/model/MSchemaVersion.java    |    127 -
 .../hadoop/hive/metastore/model/MSerDeInfo.java |    127 -
 .../metastore/model/MStorageDescriptor.java     |    277 -
 .../hive/metastore/model/MStringList.java       |     62 -
 .../hadoop/hive/metastore/model/MTable.java     |    283 -
 .../metastore/model/MTableColumnPrivilege.java  |    170 -
 .../metastore/model/MTableColumnStatistics.java |    272 -
 .../hive/metastore/model/MTablePrivilege.java   |    149 -
 .../model/MTxnWriteNotificationLog.java         |    123 -
 .../hadoop/hive/metastore/model/MType.java      |    105 -
 .../hive/metastore/model/MVersionTable.java     |     57 -
 .../hadoop/hive/metastore/model/MWMMapping.java |     83 -
 .../hadoop/hive/metastore/model/MWMPool.java    |     89 -
 .../hive/metastore/model/MWMResourcePlan.java   |    105 -
 .../hadoop/hive/metastore/model/MWMTrigger.java |     89 -
 .../hive/metastore/parser/ExpressionTree.java   |    606 -
 .../hadoop/hive/metastore/parser/Filter.g       |    486 -
 .../hive/metastore/parser/package-info.java     |     23 -
 .../spec/CompositePartitionSpecProxy.java       |    258 -
 .../spec/PartitionListComposingSpecProxy.java   |    209 -
 .../partition/spec/PartitionSpecProxy.java      |    220 -
 .../spec/PartitionSpecWithSharedSDProxy.java    |    192 -
 .../hive/metastore/security/DBTokenStore.java   |    180 -
 .../security/DelegationTokenIdentifier.java     |     52 -
 .../security/DelegationTokenSecretManager.java  |    134 -
 .../security/DelegationTokenSelector.java       |     33 -
 .../security/DelegationTokenStore.java          |    116 -
 .../metastore/security/DelegationTokenTool.java |    252 -
 .../security/HadoopThriftAuthBridge.java        |    700 -
 .../security/HadoopThriftAuthBridge23.java      |    114 -
 .../metastore/security/MemoryTokenStore.java    |    118 -
 .../MetastoreDelegationTokenManager.java        |    180 -
 .../metastore/security/TFilterTransport.java    |     99 -
 .../security/TUGIAssumingTransport.java         |     73 -
 .../security/TUGIContainingTransport.java       |     96 -
 .../TokenStoreDelegationTokenSecretManager.java |    334 -
 .../metastore/security/ZooKeeperTokenStore.java |    474 -
 .../hive/metastore/tools/HiveMetaTool.java      |    490 -
 .../hive/metastore/tools/HiveSchemaHelper.java  |    673 -
 .../metastore/tools/MetastoreSchemaTool.java    |    460 -
 .../hive/metastore/tools/SQLGenerator.java      |    187 -
 .../metastore/tools/SchemaToolCommandLine.java  |    308 -
 .../hive/metastore/tools/SchemaToolTask.java    |     32 -
 .../tools/SchemaToolTaskAlterCatalog.java       |     90 -
 .../tools/SchemaToolTaskCreateCatalog.java      |    132 -
 .../tools/SchemaToolTaskCreateUser.java         |    115 -
 .../metastore/tools/SchemaToolTaskInfo.java     |     43 -
 .../metastore/tools/SchemaToolTaskInit.java     |     73 -
 .../tools/SchemaToolTaskMoveDatabase.java       |     96 -
 .../tools/SchemaToolTaskMoveTable.java          |    142 -
 .../metastore/tools/SchemaToolTaskUpgrade.java  |    116 -
 .../metastore/tools/SchemaToolTaskValidate.java |    630 -
 .../hadoop/hive/metastore/tools/SmokeTest.java  |    102 -
 .../txn/AcidCompactionHistoryService.java       |     71 -
 .../metastore/txn/AcidHouseKeeperService.java   |     71 -
 .../txn/AcidOpenTxnsCounterService.java         |     72 -
 .../hive/metastore/txn/AcidWriteSetService.java |     69 -
 .../hive/metastore/txn/CompactionInfo.java      |    170 -
 .../metastore/txn/CompactionTxnHandler.java     |   1158 -
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |    599 -
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   4949 -
 .../hadoop/hive/metastore/txn/TxnStore.java     |    496 -
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    481 -
 .../hive/metastore/utils/CommonCliOptions.java  |    160 -
 .../hadoop/hive/metastore/utils/FileUtils.java  |    537 -
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |    395 -
 .../metastore/utils/HiveStrictManagedUtils.java |    100 -
 .../hadoop/hive/metastore/utils/JavaUtils.java  |    130 -
 .../hadoop/hive/metastore/utils/LogUtils.java   |    140 -
 .../hive/metastore/utils/MetaStoreUtils.java    |   1840 -
 .../metastore/utils/MetastoreVersionInfo.java   |    133 -
 .../hadoop/hive/metastore/utils/ObjectPair.java |     86 -
 .../hive/metastore/utils/SecurityUtils.java     |    313 -
 .../hive/metastore/utils/StringUtils.java       |    130 -
 .../hive/metastore/utils/StringableMap.java     |     80 -
 .../MetastoreDelegationTokenSupport.java        |     68 -
 .../hadoop/hive/metastore/metastore.proto       |     29 -
 .../main/resources/datanucleus-log4j.properties |     17 -
 .../main/resources/metastore-log4j2.properties  |     71 -
 .../src/main/resources/metastore-site.xml       |     34 -
 .../src/main/resources/package.jdo              |   1426 -
 .../src/main/resources/saveVersion.sh           |     91 -
 .../src/main/resources/thrift-replacements.txt  |    106 -
 standalone-metastore/src/main/scripts/base      |    231 -
 .../src/main/scripts/ext/metastore.sh           |     41 -
 .../src/main/scripts/ext/schemaTool.sh          |     33 -
 .../src/main/scripts/ext/smokeTest.sh           |     33 -
 .../src/main/scripts/metastore-config.sh        |     69 -
 .../src/main/scripts/schematool                 |     21 -
 .../src/main/scripts/start-metastore            |     22 -
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |    405 -
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |    692 -
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |    710 -
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |    710 -
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |     62 -
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |     22 -
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |     59 -
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |      5 -
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |    283 -
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |     49 -
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |      8 -
 .../src/main/sql/derby/upgrade.order.derby      |     18 -
 .../src/main/sql/mssql/create-user.mssql.sql    |      5 -
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |    947 -
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |   1246 -
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   1271 -
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   1272 -
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |     73 -
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |     39 -
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |     43 -
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |      7 -
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |    352 -
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |     51 -
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |     10 -
 .../src/main/sql/mssql/upgrade.order.mssql      |     12 -
 .../src/main/sql/mysql/create-user.mysql.sql    |      8 -
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |    910 -
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |   1183 -
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   1208 -
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   1210 -
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |     75 -
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |     42 -
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |     43 -
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |      8 -
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |    326 -
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |     51 -
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |     10 -
 .../src/main/sql/mysql/upgrade.order.mysql      |     18 -
 .../src/main/sql/oracle/create-user.oracle.sql  |      3 -
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |    856 -
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |   1140 -
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   1165 -
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   1167 -
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |     83 -
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |     39 -
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |     58 -
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |      7 -
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |    343 -
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |     51 -
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |      9 -
 .../src/main/sql/oracle/upgrade.order.oracle    |     14 -
 .../main/sql/postgres/create-user.postgres.sql  |      2 -
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |   1562 -
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |   1827 -
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   1856 -
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   1860 -
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |     73 -
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |     40 -
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |     39 -
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |      8 -
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |    360 -
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |     53 -
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |     10 -
 .../main/sql/postgres/upgrade.order.postgres    |     18 -
 .../src/main/thrift/hive_metastore.thrift       |   2318 -
 .../hadoop/hive/common/TestStatsSetupConst.java |    114 -
 .../ndv/fm/TestFMSketchSerialization.java       |    101 -
 .../hive/common/ndv/hll/TestHLLNoBias.java      |    117 -
 .../common/ndv/hll/TestHLLSerialization.java    |    270 -
 .../hive/common/ndv/hll/TestHyperLogLog.java    |    338 -
 .../common/ndv/hll/TestHyperLogLogDense.java    |     85 -
 .../common/ndv/hll/TestHyperLogLogMerge.java    |    147 -
 .../common/ndv/hll/TestHyperLogLogSparse.java   |     84 -
 .../common/ndv/hll/TestSparseEncodeHash.java    |     59 -
 .../metastore/AlternateFailurePreListener.java  |     62 -
 .../metastore/DummyEndFunctionListener.java     |     47 -
 .../metastore/DummyJdoConnectionUrlHook.java    |     45 -
 .../hadoop/hive/metastore/DummyListener.java    |    126 -
 .../metastore/DummyMetaStoreInitListener.java   |     39 -
 .../hadoop/hive/metastore/DummyPreListener.java |     49 -
 .../DummyRawStoreControlledCommit.java          |   1268 -
 .../DummyRawStoreForJdoConnection.java          |   1247 -
 .../apache/hadoop/hive/metastore/FakeDerby.java |    404 -
 .../HiveMetaStoreClientPreCatalog.java          |   3535 -
 .../InjectableBehaviourObjectStore.java         |    218 -
 .../hive/metastore/IpAddressListener.java       |    102 -
 .../hive/metastore/MetaStoreTestUtils.java      |    291 -
 .../MockPartitionExpressionForMetastore.java    |     58 -
 .../hive/metastore/NonCatCallsWithCatalog.java  |   1158 -
 .../hadoop/hive/metastore/TestAdminUser.java    |     49 -
 .../hive/metastore/TestAggregateStatsCache.java |    272 -
 .../metastore/TestCatalogNonDefaultClient.java  |     74 -
 .../metastore/TestCatalogNonDefaultSvr.java     |     68 -
 .../hive/metastore/TestCatalogOldClient.java    |     44 -
 .../hadoop/hive/metastore/TestDeadline.java     |    130 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |     51 -
 .../hadoop/hive/metastore/TestFilterHooks.java  |    254 -
 .../hive/metastore/TestHiveAlterHandler.java    |    121 -
 .../hive/metastore/TestHiveMetaStore.java       |   3103 -
 .../metastore/TestHiveMetaStoreGetMetaConf.java |    115 -
 .../TestHiveMetaStorePartitionSpecs.java        |    383 -
 .../TestHiveMetaStoreSchemaMethods.java         |   1248 -
 .../metastore/TestHiveMetaStoreTimeout.java     |    142 -
 .../hive/metastore/TestHiveMetaStoreTxns.java   |    267 -
 ...TestHiveMetaStoreWithEnvironmentContext.java |    191 -
 .../hive/metastore/TestHiveMetastoreCli.java    |     68 -
 .../hive/metastore/TestLockRequestBuilder.java  |    587 -
 .../hive/metastore/TestMarkPartition.java       |    118 -
 .../hive/metastore/TestMarkPartitionRemote.java |     34 -
 .../TestMetaStoreConnectionUrlHook.java         |     49 -
 .../TestMetaStoreEndFunctionListener.java       |    146 -
 .../metastore/TestMetaStoreEventListener.java   |    471 -
 .../TestMetaStoreEventListenerOnlyOnCommit.java |    121 -
 .../TestMetaStoreEventListenerWithOldConf.java  |    129 -
 .../metastore/TestMetaStoreInitListener.java    |     56 -
 .../metastore/TestMetaStoreListenersError.java  |     97 -
 ...stMetaStoreMaterializationsCacheCleaner.java |    328 -
 .../metastore/TestMetaStoreSchemaFactory.java   |     72 -
 .../hive/metastore/TestMetaStoreSchemaInfo.java |     55 -
 .../hadoop/hive/metastore/TestObjectStore.java  |    904 -
 .../metastore/TestObjectStoreInitRetry.java     |    135 -
 .../metastore/TestObjectStoreSchemaMethods.java |    602 -
 .../hadoop/hive/metastore/TestOldSchema.java    |    233 -
 .../TestPartitionNameWhitelistValidation.java   |    125 -
 .../hive/metastore/TestRawStoreProxy.java       |     67 -
 .../hive/metastore/TestRemoteHiveMetaStore.java |     64 -
 .../TestRemoteHiveMetaStoreIpAddress.java       |     66 -
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |     31 -
 .../TestRetriesInRetryingHMSHandler.java        |    111 -
 .../hive/metastore/TestRetryingHMSHandler.java  |     82 -
 .../metastore/TestSetUGIOnBothClientServer.java |     34 -
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |     35 -
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |     35 -
 .../apache/hadoop/hive/metastore/TestStats.java |    732 -
 .../hive/metastore/VerifyingObjectStore.java    |    219 -
 .../annotation/MetastoreCheckinTest.java        |     25 -
 .../metastore/annotation/MetastoreTest.java     |     24 -
 .../metastore/annotation/MetastoreUnitTest.java |     25 -
 .../hive/metastore/cache/TestCachedStore.java   |   1075 -
 .../metastore/cache/TestCatalogCaching.java     |    142 -
 .../metastore/client/MetaStoreClientTest.java   |     95 -
 .../client/MetaStoreFactoryForTests.java        |    112 -
 .../metastore/client/TestAddPartitions.java     |   1736 -
 .../client/TestAddPartitionsFromPartSpec.java   |   1267 -
 .../metastore/client/TestAlterPartitions.java   |   1130 -
 .../metastore/client/TestAppendPartitions.java  |    594 -
 .../hive/metastore/client/TestCatalogs.java     |    267 -
 .../metastore/client/TestCheckConstraint.java   |    363 -
 .../hive/metastore/client/TestDatabases.java    |    634 -
 .../metastore/client/TestDefaultConstraint.java |    363 -
 .../metastore/client/TestDropPartitions.java    |    659 -
 .../client/TestExchangePartitions.java          |   1337 -
 .../hive/metastore/client/TestForeignKey.java   |    538 -
 .../hive/metastore/client/TestFunctions.java    |    765 -
 .../metastore/client/TestGetPartitions.java     |    608 -
 .../hive/metastore/client/TestGetTableMeta.java |    330 -
 .../metastore/client/TestListPartitions.java    |   1522 -
 .../metastore/client/TestNotNullConstraint.java |    355 -
 .../hive/metastore/client/TestPrimaryKey.java   |    468 -
 .../hive/metastore/client/TestRuntimeStats.java |    154 -
 .../TestTablesCreateDropAlterTruncate.java      |   1384 -
 .../metastore/client/TestTablesGetExists.java   |    514 -
 .../hive/metastore/client/TestTablesList.java   |    320 -
 .../metastore/client/TestUniqueConstraint.java  |    356 -
 .../hive/metastore/client/package-info.java     |     22 -
 .../merge/DecimalColumnStatsMergerTest.java     |    235 -
 .../hive/metastore/conf/TestMetastoreConf.java  |    433 -
 .../TestDataSourceProviderFactory.java          |    248 -
 .../hive/metastore/dbinstall/DbInstallBase.java |    265 -
 .../hive/metastore/dbinstall/ITestMysql.java    |     82 -
 .../hive/metastore/dbinstall/ITestOracle.java   |     83 -
 .../hive/metastore/dbinstall/ITestPostgres.java |     82 -
 .../metastore/dbinstall/ITestSqlServer.java     |     84 -
 .../json/TestJSONMessageDeserializer.java       |    115 -
 .../hive/metastore/metrics/TestMetrics.java     |    164 -
 .../minihms/AbstractMetaStoreService.java       |    173 -
 .../minihms/ClusterMetaStoreForTests.java       |     32 -
 .../minihms/EmbeddedMetaStoreForTests.java      |     33 -
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |     76 -
 .../minihms/RemoteMetaStoreForTests.java        |     43 -
 .../hive/metastore/minihms/package-info.java    |     23 -
 .../tools/TestMetastoreSchemaTool.java          |     70 -
 .../tools/TestSchemaToolForMetastore.java       |    534 -
 .../metastore/txn/TestTxnHandlerNegative.java   |     58 -
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |    239 -
 .../hive/metastore/utils/TestHdfsUtils.java     |    348 -
 .../metastore/utils/TestMetaStoreUtils.java     |    291 -
 .../src/test/resources/log4j2.properties        |     35 -
 upgrade-acid/pom.xml                            |      2 +-
 1844 files changed, 755932 insertions(+), 955166 deletions(-)
----------------------------------------------------------------------



[57/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
index 0000000,2454479..106d9f2
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java
@@@ -1,0 -1,1226 +1,1268 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -
 -import java.nio.ByteBuffer;
 -import java.util.ArrayList;
 -import java.util.Collections;
 -import java.util.List;
 -import java.util.Map;
 -
 -import org.apache.hadoop.conf.Configurable;
 -import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
++import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
++import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.Type;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
++import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
++import org.apache.hadoop.hive.metastore.api.ISchemaName;
++import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
++
++import java.nio.ByteBuffer;
++import java.util.ArrayList;
++import java.util.Collections;
++import java.util.List;
++import java.util.Map;
++
++import org.apache.hadoop.conf.Configurable;
++import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.thrift.TException;
+ 
+ /**
+  * A wrapper around {@link org.apache.hadoop.hive.metastore.ObjectStore}
+  * with the ability to control the result of commitTransaction().
+  * All other functions simply delegate to an embedded ObjectStore object.
+  * Ideally, we should have just extended ObjectStore instead of using
+  * delegation.  However, since HiveMetaStore uses a Proxy, this class must
+  * not inherit from any other class.
+  */
+ public class DummyRawStoreControlledCommit implements RawStore, Configurable {
+ 
+   private final ObjectStore objectStore;
+   public DummyRawStoreControlledCommit() {
+     objectStore = new ObjectStore();
+   }
+ 
+  /**
+   * If true, shouldCommit() will simply call delegate commitTransaction() to the
+   * underlying ObjectStore.
+   * If false, shouldCommit() immediately returns false.
+   */
+   private static boolean shouldCommitSucceed = true;
+   public static void setCommitSucceed(boolean flag) {
+     shouldCommitSucceed = flag;
+   }
+ 
+   @Override
+   public boolean commitTransaction() {
+     if (shouldCommitSucceed) {
+       return objectStore.commitTransaction();
+     } else {
+       return false;
+     }
+   }
+ 
+   @Override
+   public boolean isActiveTransaction() {
+     return false;
+   }
+ 
+   // All remaining functions simply delegate to objectStore
+ 
+   @Override
+   public Configuration getConf() {
+     return objectStore.getConf();
+   }
+ 
+   @Override
+   public void setConf(Configuration conf) {
+     objectStore.setConf(conf);
+   }
+ 
+   @Override
+   public void shutdown() {
+     objectStore.shutdown();
+   }
+ 
+   @Override
+   public boolean openTransaction() {
+     return objectStore.openTransaction();
+   }
+ 
+   @Override
+   public void rollbackTransaction() {
+     objectStore.rollbackTransaction();
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     objectStore.createCatalog(cat);
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat) throws MetaException,
+       InvalidOperationException {
+     objectStore.alterCatalog(catName, cat);
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     return objectStore.getCatalog(catalogName);
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     return objectStore.getCatalogs();
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     objectStore.dropCatalog(catalogName);
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     objectStore.createDatabase(db);
+   }
+ 
+   @Override
+   public Database getDatabase(String catName, String dbName) throws NoSuchObjectException {
+     return objectStore.getDatabase(catName, dbName);
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbName)
+       throws NoSuchObjectException, MetaException {
+     return objectStore.dropDatabase(catName, dbName);
+   }
+ 
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+       throws NoSuchObjectException, MetaException {
+ 
+     return objectStore.alterDatabase(catName, dbName, db);
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     return objectStore.getDatabases(catName, pattern);
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     return objectStore.getAllDatabases(catName);
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     return objectStore.createType(type);
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     return objectStore.getType(typeName);
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     return objectStore.dropType(typeName);
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     objectStore.createTable(tbl);
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     return objectStore.dropTable(catName, dbName, tableName);
+   }
+ 
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+     return objectStore.getTable(catName, dbName, tableName);
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName, long txnId, String writeIdList)
++      throws MetaException {
++    return objectStore.getTable(catName, dbName, tableName, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException {
+     return objectStore.addPartition(part);
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartition(catName, dbName, tableName, partVals);
+   }
+ 
+   @Override
++  public Partition getPartition(String catName, String dbName, String tableName,
++                                List<String> partVals, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getPartition(catName, dbName, tableName, partVals, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean dropPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws MetaException, NoSuchObjectException,
+       InvalidObjectException, InvalidInputException {
+     return objectStore.dropPartition(catName, dbName, tableName, partVals);
+   }
+ 
+   @Override
+   public List<Partition> getPartitions(String catName, String dbName, String tableName, int max)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitions(catName, dbName, tableName, max);
+   }
+ 
+   @Override
+   public Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max) {
+     return objectStore.getPartitionLocations(catName, dbName, tblName, baseLocationToNotShow, max);
+   }
+ 
+   @Override
 -  public void alterTable(String catName, String dbName, String name, Table newTable)
++  public void alterTable(String catName, String dbName, String name, Table newTable,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException {
 -    objectStore.alterTable(catName, dbName, name, newTable);
++    objectStore.alterTable(catName, dbName, name, newTable, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException {
+     objectStore.updateCreationMetadata(catName, dbname, tablename, cm);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern) throws MetaException {
+     return objectStore.getTables(catName, dbName, pattern);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType) throws MetaException {
+     return objectStore.getTables(catName, dbName, pattern, tableType);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getMaterializedViewsForRewriting(catName, dbName);
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames, List<String> tableTypes)
+       throws MetaException {
+     return objectStore.getTableMeta(catName, dbNames, tableNames, tableTypes);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName, List<String> tableNames)
+       throws MetaException, UnknownDBException {
+     return objectStore.getTableObjectsByName(catName, dbName, tableNames);
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     return objectStore.getAllTables(catName, dbName);
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+       short maxTables) throws MetaException, UnknownDBException {
+     return objectStore.listTableNamesByFilter(catName, dbName, filter, maxTables);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tblName, short maxParts)
+       throws MetaException {
+     return objectStore.listPartitionNames(catName, dbName, tblName, maxParts);
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(String catName, String db_name,
+       String tbl_name, List<FieldSchema> cols, boolean applyDistinct, String filter,
+       boolean ascending, List<FieldSchema> order, long maxParts) throws MetaException {
+     return null;
+   }
+ 
+   @Override
+   public void alterPartition(String catName, String dbName, String tblName, List<String> partVals,
 -      Partition newPart) throws InvalidObjectException, MetaException {
 -    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart);
++      Partition newPart, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
++    objectStore.alterPartition(catName, dbName, tblName, partVals, newPart, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public void alterPartitions(String catName, String dbName, String tblName,
 -      List<List<String>> partValsList, List<Partition> newParts)
 -      throws InvalidObjectException, MetaException {
 -    objectStore.alterPartitions(catName, dbName, tblName, partValsList, newParts);
++      List<List<String>> partValsList, List<Partition> newParts,
++      long writeId, long queryTxnId, String queryValidWriteIds) throws InvalidObjectException, MetaException {
++    objectStore.alterPartitions(
++        catName, dbName, tblName, partValsList, newParts, writeId, queryTxnId, queryValidWriteIds);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByFilter(String catName, String dbName, String tblName,
+       String filter, short maxParts) throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionsByFilter(catName, dbName, tblName, filter, maxParts);
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tblName,
+                                       String filter) throws MetaException, NoSuchObjectException {
+     return objectStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByExpr(String catName, String dbName, String tblName,
+                                       byte[] expr) throws MetaException, NoSuchObjectException {
+     return objectStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+       List<String> partNames) throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionsByNames(catName, dbName, tblName, partNames);
+   }
+ 
+   @Override
+   public boolean getPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr,
+       String defaultPartitionName, short maxParts, List<Partition> result) throws TException {
+     return objectStore.getPartitionsByExpr(catName,
+         dbName, tblName, expr, defaultPartitionName, maxParts, result);
+   }
+ 
+   @Override
+   public Table markPartitionForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partVals, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return objectStore.markPartitionForEvent(catName, dbName, tblName, partVals, evtType);
+   }
+ 
+   @Override
+   public boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName,
+       Map<String, String> partName, PartitionEventType evtType)
+       throws MetaException, UnknownTableException, InvalidPartitionException,
+       UnknownPartitionException {
+     return objectStore.isPartitionMarkedForEvent(catName, dbName, tblName, partName, evtType);
+   }
+ 
+   @Override
+   public boolean addRole(String rowName, String ownerName) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+     return objectStore.addRole(rowName, ownerName);
+   }
+ 
+   @Override
+   public boolean removeRole(String roleName)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.removeRole(roleName);
+   }
+ 
+   @Override
+   public boolean grantRole(Role role, String userName, PrincipalType principalType,
+       String grantor, PrincipalType grantorType, boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.grantRole(role, userName, principalType, grantor, grantorType,
+         grantOption);
+   }
+ 
+   @Override
+   public boolean revokeRole(Role role, String userName, PrincipalType principalType, boolean grantOption)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.revokeRole(role, userName, principalType, grantOption);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return objectStore.getUserPrivilegeSet(userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getDBPrivilegeSet(String catName, String dbName, String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException {
+     return objectStore.getDBPrivilegeSet(catName, dbName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getTablePrivilegeSet(String catName, String dbName, String tableName,
+       String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getTablePrivilegeSet(catName, dbName, tableName, userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getPartitionPrivilegeSet(String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getPartitionPrivilegeSet(catName, dbName, tableName, partition,
+         userName, groupNames);
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet getColumnPrivilegeSet(String catName, String dbName, String tableName,
+       String partitionName, String columnName, String userName, List<String> groupNames)
+       throws InvalidObjectException, MetaException {
+     return objectStore.getColumnPrivilegeSet(catName, dbName, tableName, partitionName,
+         columnName, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType) {
+     return objectStore.listPrincipalGlobalGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName) {
+     return objectStore.listPrincipalDBGrants(principalName, principalType, catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listAllTableGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName) {
+     return objectStore.listAllTableGrants(principalName, principalType,
+         catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, List<String> partValues,
+       String partName) {
+     return objectStore.listPrincipalPartitionGrants(principalName, principalType,
+         catName, dbName, tableName, partValues, partName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName, String tableName, String columnName) {
+     return objectStore.listPrincipalTableColumnGrants(principalName, principalType,
+         catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName, String tableName,
+       List<String> partVals, String partName, String columnName) {
+     return objectStore.listPrincipalPartitionColumnGrants(principalName, principalType,
+         catName, dbName, tableName, partVals, partName, columnName);
+   }
+ 
+   @Override
+   public boolean grantPrivileges(PrivilegeBag privileges) throws InvalidObjectException,
+       MetaException, NoSuchObjectException {
+     return objectStore.grantPrivileges(privileges);
+   }
+ 
+   @Override
+   public boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+       throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return objectStore.revokePrivileges(privileges, grantOption);
+   }
+ 
+   @Override
+   public boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+           throws InvalidObjectException, MetaException, NoSuchObjectException {
+     return objectStore.refreshPrivileges(objToRefresh, authorizer, grantPrivileges);
+   }
+   @Override
+   public Role getRole(String roleName) throws NoSuchObjectException {
+     return objectStore.getRole(roleName);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() {
+     return objectStore.listRoleNames();
+   }
+ 
+   @Override
+   public List<Role> listRoles(String principalName, PrincipalType principalType) {
+     return objectStore.listRoles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType) {
+     return objectStore.listRolesWithGrants(principalName, principalType);
+   }
+ 
+   @Override
+   public List<RolePrincipalGrant> listRoleMembers(String roleName) {
+     return objectStore.listRoleMembers(roleName);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.getPartitionWithAuth(catName, dbName, tblName, partVals, userName,
+         groupNames);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsWithAuth(String catName, String dbName, String tblName,
+       short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException {
+     return objectStore.getPartitionsWithAuth(catName, dbName, tblName, maxParts, userName,
+         groupNames);
+   }
+ 
+   @Override
+   public List<String> listPartitionNamesPs(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.listPartitionNamesPs(catName, dbName, tblName, partVals, maxParts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsPsWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException {
+     return objectStore.listPartitionsPsWithAuth(catName, dbName, tblName, partVals, maxParts,
+         userName, groupNames);
+   }
+ 
+   @Override
+   public long cleanupEvents() {
+     return objectStore.cleanupEvents();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalDBGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalTableGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalPartitionGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalTableColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType) {
+     return objectStore.listPrincipalPartitionColumnGrantsAll(principalName, principalType);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listGlobalGrantsAll() {
+     return objectStore.listGlobalGrantsAll();
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName) {
+     return objectStore.listDBGrantsAll(catName, dbName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionColumnGrantsAll(String catName, String dbName, String tableName,
+       String partitionName, String columnName) {
+     return objectStore.listPartitionColumnGrantsAll(catName, dbName, tableName, partitionName, columnName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName) {
+     return objectStore.listTableGrantsAll(catName, dbName, tableName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listPartitionGrantsAll(String catName, String dbName, String tableName,
+       String partitionName) {
+     return objectStore.listPartitionGrantsAll(catName, dbName, tableName, partitionName);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> listTableColumnGrantsAll(String catName, String dbName, String tableName,
+       String columnName) {
+     return objectStore.listTableColumnGrantsAll(catName, dbName, tableName, columnName);
+   }
+ 
+   @Override
+   public ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+       List<String> colNames) throws MetaException, NoSuchObjectException {
+     return objectStore.getTableColumnStatistics(catName, dbName, tableName, colNames);
+   }
+ 
+   @Override
++  public ColumnStatistics getTableColumnStatistics(String catName, String dbName,
++                                                   String tableName, List<String> colNames,
++                                                   long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getTableColumnStatistics(
++        catName, dbName, tableName, colNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                              String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException {
+     return objectStore.deleteTableColumnStatistics(catName, dbName, tableName, colName);
+   }
+ 
+   @Override
+   public boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+       String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     return objectStore.deletePartitionColumnStatistics(catName, dbName, tableName, partName,
+         partVals, colName);
+   }
+ 
+   @Override
+   public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     return objectStore.updateTableColumnStatistics(statsObj);
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+       List<String> partVals)
+       throws NoSuchObjectException, MetaException, InvalidObjectException,
+       InvalidInputException {
+     return objectStore.updatePartitionColumnStatistics(statsObj, partVals);
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) {
+     return false;
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) {
+     return false;
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) {
+     return "";
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() {
+     return new ArrayList<>();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException {
+     return -1;
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+     throws NoSuchObjectException, MetaException {}
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) {
+     return false;
+   }
+ 
+   @Override
+   public String[] getMasterKeys() {
+     return new String[0];
+   }
+ 
+   @Override
+   public void verifySchema() throws MetaException {
+   }
+ 
+   @Override
+   public String getMetaStoreSchemaVersion() throws MetaException {
+     return objectStore.getMetaStoreSchemaVersion();
+   }
+ 
+   @Override
+   public void setMetaStoreSchemaVersion(String schemaVersion, String comment) throws MetaException {
+     objectStore.setMetaStoreSchemaVersion(schemaVersion, comment);
+ 
+   }
+ 
+   @Override
+   public List<ColumnStatistics> getPartitionColumnStatistics(String catName, String dbName,
+       String tblName, List<String> colNames, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.getPartitionColumnStatistics(catName, dbName, tblName  , colNames, partNames);
+   }
+ 
+   @Override
++  public List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName, List<String> partNames,
++      List<String> colNames, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return objectStore.getPartitionColumnStatistics(
++             catName, dbName, tblName  , colNames, partNames, txnId, writeIdList);
++  }
++
++  @Override
+   public boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> partVals)
+       throws MetaException, NoSuchObjectException {
+     return objectStore.doesPartitionExist(catName, dbName, tableName, partKeys, partVals);
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException {
+     return objectStore.addPartitions(catName, dbName, tblName, parts);
+   }
+ 
+   @Override
+   public boolean addPartitions(String catName, String dbName, String tblName, PartitionSpecProxy partitionSpec, boolean ifNotExists) throws InvalidObjectException, MetaException {
+     return false;
+   }
+ 
+   @Override
+   public void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException {
+     objectStore.dropPartitions(catName, dbName, tblName, partNames);
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException,
+       MetaException {
+     objectStore.createFunction(func);
+   }
+ 
+   @Override
+   public void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException {
+     objectStore.alterFunction(catName, dbName, funcName, newFunction);
+   }
+ 
+   @Override
+   public void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException {
+     objectStore.dropFunction(catName, dbName, funcName);
+   }
+ 
+   @Override
+   public Function getFunction(String catName, String dbName, String funcName)
+       throws MetaException {
+     return objectStore.getFunction(catName, dbName, funcName);
+   }
+ 
+   @Override
+   public List<Function> getAllFunctions(String catName)
+           throws MetaException {
+     return Collections.emptyList();
+   }
+ 
+   @Override
+   public List<String> getFunctions(String catName, String dbName, String pattern)
+       throws MetaException {
+     return objectStore.getFunctions(catName, dbName, pattern);
+   }
+ 
+   @Override
+   public AggrStats get_aggr_stats_for(String catName, String dbName,
+       String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException {
+     return null;
+   }
+ 
+   @Override
++  public AggrStats get_aggr_stats_for(String catName, String dbName,
++                                      String tblName, List<String> partNames,
++                                      List<String> colNames,
++                                      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException {
++    return null;
++  }
++
++  @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return objectStore.getNextNotification(rqst);
+   }
+ 
+   @Override
+   public void addNotificationEvent(NotificationEvent event) throws MetaException {
+     objectStore.addNotificationEvent(event);
+   }
+ 
+   @Override
+   public void cleanNotificationEvents(int olderThan) {
+     objectStore.cleanNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public CurrentNotificationEventId getCurrentNotificationEventId() {
+     return objectStore.getCurrentNotificationEventId();
+   }
+ 
+   @Override
+   public NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst) {
+     return  objectStore.getNotificationEventsCount(rqst);
+   }
+ 
+   @Override
+   public void flushCache() {
+     objectStore.flushCache();
+   }
+ 
+   @Override
+   public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
+     return null;
+   }
+ 
+   @Override
+   public void putFileMetadata(
+       List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType type) {
+   }
+ 
+   @Override
+   public boolean isFileMetadataSupported() {
+     return false;
+   }
+ 
+ 
+   @Override
+   public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] eliminated) {
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return objectStore.getTableCount();
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return objectStore.getPartitionCount();
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return objectStore.getDatabaseCount();
+   }
+ 
+   @Override
+   public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) {
+     return null;
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name, String tbl_name)
+     throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name, String tbl_name)
+       throws MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName,
+    String constraintName, boolean missingOk) throws NoSuchObjectException {
+    // TODO Auto-generated method stub
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks)
+     throws InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks)
+     throws InvalidObjectException, MetaException {
+     return null;
+   }
+ 
+   @Override
+   public List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns)
+     throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addDefaultConstraints(List<SQLDefaultConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public List<String> addCheckConstraints(List<SQLCheckConstraint> nns)
+       throws InvalidObjectException, MetaException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public String getMetastoreDbUuid() throws MetaException {
+     throw new MetaException("Get metastore uuid is not implemented");
+   }
+ 
+   @Override
+   public void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException {
+     objectStore.createResourcePlan(resourcePlan, copyFrom, defaultPoolSize);
+   }
+ 
+   @Override
+   public WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException,
+       MetaException {
+     return objectStore.getResourcePlan(name);
+   }
+ 
+   @Override
+   public List<WMResourcePlan> getAllResourcePlans() throws MetaException {
+     return objectStore.getAllResourcePlans();
+   }
+ 
+   @Override
+   public WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException {
+     return objectStore.alterResourcePlan(
+       name, resourcePlan, canActivateDisabled, canDeactivate, isReplace);
+   }
+ 
+   @Override
+   public WMFullResourcePlan getActiveResourcePlan() throws MetaException {
+     return objectStore.getActiveResourcePlan();
+   }
+ 
+   @Override
+   public WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException {
+     return objectStore.validateResourcePlan(name);
+   }
+ 
+   @Override
+   public void dropResourcePlan(String name) throws NoSuchObjectException, MetaException {
+     objectStore.dropResourcePlan(name);
+   }
+ 
+   @Override
+   public void createWMTrigger(WMTrigger trigger)
+       throws AlreadyExistsException, MetaException, NoSuchObjectException,
+           InvalidOperationException {
+     objectStore.createWMTrigger(trigger);
+   }
+ 
+   @Override
+   public void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.alterWMTrigger(trigger);
+   }
+ 
+   @Override
+   public void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMTrigger(resourcePlanName, triggerName);
+   }
+ 
+   @Override
+   public List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException {
+     return objectStore.getTriggersForResourcePlan(resourcePlanName);
+   }
+ 
+   @Override
+   public void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+     objectStore.createPool(pool);
+   }
+ 
+   @Override
+   public void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.alterPool(pool, poolPath);
+   }
+ 
+   @Override
+   public void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMPool(resourcePlanName, poolPath);
+   }
+ 
+   @Override
+   public void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+       MetaException {
+     objectStore.createOrUpdateWMMapping(mapping, update);
+   }
+ 
+   @Override
+   public void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMMapping(mapping);
+   }
+ 
+   @Override
+   public void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException {
+     objectStore.createWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+   }
+ 
+   @Override
+   public void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName,
+       String poolPath) throws NoSuchObjectException, InvalidOperationException, MetaException {
+     objectStore.dropWMTriggerToPoolMapping(resourcePlanName, triggerName, poolPath);
+   }
+ 
+ 
+   @Override
+   public List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     // TODO Auto-generated method stub
+     return null;
+   }
+ 
+   @Override
+   public void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+       NoSuchObjectException {
+     objectStore.createISchema(schema);
+   }
+ 
+   @Override
+   public void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException,
+       MetaException {
+     objectStore.alterISchema(schemaName, newSchema);
+   }
+ 
+   @Override
+   public ISchema getISchema(ISchemaName schemaName) throws MetaException {
+     return objectStore.getISchema(schemaName);
+   }
+ 
+   @Override
+   public void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException {
+     objectStore.dropISchema(schemaName);
+   }
+ 
+   @Override
+   public void addSchemaVersion(SchemaVersion schemaVersion) throws
+       AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException {
+     objectStore.addSchemaVersion(schemaVersion);
+   }
+ 
+   @Override
+   public void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion) throws
+       NoSuchObjectException, MetaException {
+     objectStore.alterSchemaVersion(version, newVersion);
+   }
+ 
+   @Override
+   public SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException {
+     return objectStore.getSchemaVersion(version);
+   }
+ 
+   @Override
+   public SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return objectStore.getLatestSchemaVersion(schemaName);
+   }
+ 
+   @Override
+   public List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException {
+     return objectStore.getAllSchemaVersion(schemaName);
+   }
+ 
+   @Override
+   public List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace,
+                                                         String type) throws MetaException {
+     return objectStore.getSchemaVersionsByColumns(colName, colNamespace, type);
+   }
+ 
+   @Override
+   public void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException,
+       MetaException {
+     objectStore.dropSchemaVersion(version);
+   }
+ 
+   @Override
+   public SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException {
+     return objectStore.getSerDeInfo(serDeName);
+   }
+ 
+   @Override
+   public void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException {
+     objectStore.addSerde(serde);
+   }
+ 
+   @Override
+   public void addRuntimeStat(RuntimeStat stat) throws MetaException {
+     objectStore.addRuntimeStat(stat);
+   }
+ 
+   @Override
+   public List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException {
+     return objectStore.getRuntimeStats(maxEntries, maxCreateTime);
+   }
+ 
+   @Override
+   public int deleteRuntimeStats(int maxRetainSecs) throws MetaException {
+     return objectStore.deleteRuntimeStats(maxRetainSecs);
+   }
+ 
+   @Override
+   public void cleanWriteNotificationEvents(int olderThan) {
+     objectStore.cleanWriteNotificationEvents(olderThan);
+   }
+ 
+   @Override
+   public List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException {
+     return objectStore.getAllWriteEventInfo(txnId, dbName, tableName);
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName,
+       String dbName, String tableName) throws MetaException,
+       NoSuchObjectException {
+     return null;
+   }
+ }


[77/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
index 0000000,031e72b..df3c586
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ttypes.py
@@@ -1,0 -1,23076 +1,23608 @@@
+ #
+ # Autogenerated by Thrift Compiler (0.9.3)
+ #
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ #
+ #  options string: py
+ #
+ 
+ from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+ import fb303.ttypes
+ 
+ 
+ from thrift.transport import TTransport
+ from thrift.protocol import TBinaryProtocol, TProtocol
+ try:
+   from thrift.protocol import fastbinary
+ except:
+   fastbinary = None
+ 
+ 
+ class HiveObjectType:
+   GLOBAL = 1
+   DATABASE = 2
+   TABLE = 3
+   PARTITION = 4
+   COLUMN = 5
+ 
+   _VALUES_TO_NAMES = {
+     1: "GLOBAL",
+     2: "DATABASE",
+     3: "TABLE",
+     4: "PARTITION",
+     5: "COLUMN",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "GLOBAL": 1,
+     "DATABASE": 2,
+     "TABLE": 3,
+     "PARTITION": 4,
+     "COLUMN": 5,
+   }
+ 
+ class PrincipalType:
+   USER = 1
+   ROLE = 2
+   GROUP = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "USER",
+     2: "ROLE",
+     3: "GROUP",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "USER": 1,
+     "ROLE": 2,
+     "GROUP": 3,
+   }
+ 
+ class PartitionEventType:
+   LOAD_DONE = 1
+ 
+   _VALUES_TO_NAMES = {
+     1: "LOAD_DONE",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "LOAD_DONE": 1,
+   }
+ 
+ class TxnState:
+   COMMITTED = 1
+   ABORTED = 2
+   OPEN = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "COMMITTED",
+     2: "ABORTED",
+     3: "OPEN",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "COMMITTED": 1,
+     "ABORTED": 2,
+     "OPEN": 3,
+   }
+ 
+ class LockLevel:
+   DB = 1
+   TABLE = 2
+   PARTITION = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "DB",
+     2: "TABLE",
+     3: "PARTITION",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "DB": 1,
+     "TABLE": 2,
+     "PARTITION": 3,
+   }
+ 
+ class LockState:
+   ACQUIRED = 1
+   WAITING = 2
+   ABORT = 3
+   NOT_ACQUIRED = 4
+ 
+   _VALUES_TO_NAMES = {
+     1: "ACQUIRED",
+     2: "WAITING",
+     3: "ABORT",
+     4: "NOT_ACQUIRED",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "ACQUIRED": 1,
+     "WAITING": 2,
+     "ABORT": 3,
+     "NOT_ACQUIRED": 4,
+   }
+ 
+ class LockType:
+   SHARED_READ = 1
+   SHARED_WRITE = 2
+   EXCLUSIVE = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "SHARED_READ",
+     2: "SHARED_WRITE",
+     3: "EXCLUSIVE",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "SHARED_READ": 1,
+     "SHARED_WRITE": 2,
+     "EXCLUSIVE": 3,
+   }
+ 
+ class CompactionType:
+   MINOR = 1
+   MAJOR = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "MINOR",
+     2: "MAJOR",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "MINOR": 1,
+     "MAJOR": 2,
+   }
+ 
+ class GrantRevokeType:
+   GRANT = 1
+   REVOKE = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "GRANT",
+     2: "REVOKE",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "GRANT": 1,
+     "REVOKE": 2,
+   }
+ 
+ class DataOperationType:
+   SELECT = 1
+   INSERT = 2
+   UPDATE = 3
+   DELETE = 4
+   UNSET = 5
+   NO_TXN = 6
+ 
+   _VALUES_TO_NAMES = {
+     1: "SELECT",
+     2: "INSERT",
+     3: "UPDATE",
+     4: "DELETE",
+     5: "UNSET",
+     6: "NO_TXN",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "SELECT": 1,
+     "INSERT": 2,
+     "UPDATE": 3,
+     "DELETE": 4,
+     "UNSET": 5,
+     "NO_TXN": 6,
+   }
+ 
+ class EventRequestType:
+   INSERT = 1
+   UPDATE = 2
+   DELETE = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "INSERT",
+     2: "UPDATE",
+     3: "DELETE",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "INSERT": 1,
+     "UPDATE": 2,
+     "DELETE": 3,
+   }
+ 
+ class SerdeType:
+   HIVE = 1
+   SCHEMA_REGISTRY = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "HIVE",
+     2: "SCHEMA_REGISTRY",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "HIVE": 1,
+     "SCHEMA_REGISTRY": 2,
+   }
+ 
+ class SchemaType:
+   HIVE = 1
+   AVRO = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "HIVE",
+     2: "AVRO",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "HIVE": 1,
+     "AVRO": 2,
+   }
+ 
+ class SchemaCompatibility:
+   NONE = 1
+   BACKWARD = 2
+   FORWARD = 3
+   BOTH = 4
+ 
+   _VALUES_TO_NAMES = {
+     1: "NONE",
+     2: "BACKWARD",
+     3: "FORWARD",
+     4: "BOTH",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "NONE": 1,
+     "BACKWARD": 2,
+     "FORWARD": 3,
+     "BOTH": 4,
+   }
+ 
+ class SchemaValidation:
+   LATEST = 1
+   ALL = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "LATEST",
+     2: "ALL",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "LATEST": 1,
+     "ALL": 2,
+   }
+ 
+ class SchemaVersionState:
+   INITIATED = 1
+   START_REVIEW = 2
+   CHANGES_REQUIRED = 3
+   REVIEWED = 4
+   ENABLED = 5
+   DISABLED = 6
+   ARCHIVED = 7
+   DELETED = 8
+ 
+   _VALUES_TO_NAMES = {
+     1: "INITIATED",
+     2: "START_REVIEW",
+     3: "CHANGES_REQUIRED",
+     4: "REVIEWED",
+     5: "ENABLED",
+     6: "DISABLED",
+     7: "ARCHIVED",
+     8: "DELETED",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "INITIATED": 1,
+     "START_REVIEW": 2,
+     "CHANGES_REQUIRED": 3,
+     "REVIEWED": 4,
+     "ENABLED": 5,
+     "DISABLED": 6,
+     "ARCHIVED": 7,
+     "DELETED": 8,
+   }
+ 
+ class FunctionType:
+   JAVA = 1
+ 
+   _VALUES_TO_NAMES = {
+     1: "JAVA",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "JAVA": 1,
+   }
+ 
+ class ResourceType:
+   JAR = 1
+   FILE = 2
+   ARCHIVE = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "JAR",
+     2: "FILE",
+     3: "ARCHIVE",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "JAR": 1,
+     "FILE": 2,
+     "ARCHIVE": 3,
+   }
+ 
+ class FileMetadataExprType:
+   ORC_SARG = 1
+ 
+   _VALUES_TO_NAMES = {
+     1: "ORC_SARG",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "ORC_SARG": 1,
+   }
+ 
+ class ClientCapability:
+   TEST_CAPABILITY = 1
+   INSERT_ONLY_TABLES = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "TEST_CAPABILITY",
+     2: "INSERT_ONLY_TABLES",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "TEST_CAPABILITY": 1,
+     "INSERT_ONLY_TABLES": 2,
+   }
+ 
+ class WMResourcePlanStatus:
+   ACTIVE = 1
+   ENABLED = 2
+   DISABLED = 3
+ 
+   _VALUES_TO_NAMES = {
+     1: "ACTIVE",
+     2: "ENABLED",
+     3: "DISABLED",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "ACTIVE": 1,
+     "ENABLED": 2,
+     "DISABLED": 3,
+   }
+ 
+ class WMPoolSchedulingPolicy:
+   FAIR = 1
+   FIFO = 2
+ 
+   _VALUES_TO_NAMES = {
+     1: "FAIR",
+     2: "FIFO",
+   }
+ 
+   _NAMES_TO_VALUES = {
+     "FAIR": 1,
+     "FIFO": 2,
+   }
+ 
+ 
+ class Version:
+   """
+   Attributes:
+    - version
+    - comments
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'version', None, None, ), # 1
+     (2, TType.STRING, 'comments', None, None, ), # 2
+   )
+ 
+   def __init__(self, version=None, comments=None,):
+     self.version = version
+     self.comments = comments
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.version = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.comments = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('Version')
+     if self.version is not None:
+       oprot.writeFieldBegin('version', TType.STRING, 1)
+       oprot.writeString(self.version)
+       oprot.writeFieldEnd()
+     if self.comments is not None:
+       oprot.writeFieldBegin('comments', TType.STRING, 2)
+       oprot.writeString(self.comments)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.version)
+     value = (value * 31) ^ hash(self.comments)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class FieldSchema:
+   """
+   Attributes:
+    - name
+    - type
+    - comment
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'name', None, None, ), # 1
+     (2, TType.STRING, 'type', None, None, ), # 2
+     (3, TType.STRING, 'comment', None, None, ), # 3
+   )
+ 
+   def __init__(self, name=None, type=None, comment=None,):
+     self.name = name
+     self.type = type
+     self.comment = comment
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.type = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.comment = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('FieldSchema')
+     if self.name is not None:
+       oprot.writeFieldBegin('name', TType.STRING, 1)
+       oprot.writeString(self.name)
+       oprot.writeFieldEnd()
+     if self.type is not None:
+       oprot.writeFieldBegin('type', TType.STRING, 2)
+       oprot.writeString(self.type)
+       oprot.writeFieldEnd()
+     if self.comment is not None:
+       oprot.writeFieldBegin('comment', TType.STRING, 3)
+       oprot.writeString(self.comment)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.name)
+     value = (value * 31) ^ hash(self.type)
+     value = (value * 31) ^ hash(self.comment)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class SQLPrimaryKey:
+   """
+   Attributes:
+    - table_db
+    - table_name
+    - column_name
+    - key_seq
+    - pk_name
+    - enable_cstr
+    - validate_cstr
+    - rely_cstr
+    - catName
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'table_db', None, None, ), # 1
+     (2, TType.STRING, 'table_name', None, None, ), # 2
+     (3, TType.STRING, 'column_name', None, None, ), # 3
+     (4, TType.I32, 'key_seq', None, None, ), # 4
+     (5, TType.STRING, 'pk_name', None, None, ), # 5
+     (6, TType.BOOL, 'enable_cstr', None, None, ), # 6
+     (7, TType.BOOL, 'validate_cstr', None, None, ), # 7
+     (8, TType.BOOL, 'rely_cstr', None, None, ), # 8
+     (9, TType.STRING, 'catName', None, None, ), # 9
+   )
+ 
+   def __init__(self, table_db=None, table_name=None, column_name=None, key_seq=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None, catName=None,):
+     self.table_db = table_db
+     self.table_name = table_name
+     self.column_name = column_name
+     self.key_seq = key_seq
+     self.pk_name = pk_name
+     self.enable_cstr = enable_cstr
+     self.validate_cstr = validate_cstr
+     self.rely_cstr = rely_cstr
+     self.catName = catName
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.table_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.table_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.column_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.I32:
+           self.key_seq = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.pk_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.BOOL:
+           self.enable_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.BOOL:
+           self.validate_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 8:
+         if ftype == TType.BOOL:
+           self.rely_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 9:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('SQLPrimaryKey')
+     if self.table_db is not None:
+       oprot.writeFieldBegin('table_db', TType.STRING, 1)
+       oprot.writeString(self.table_db)
+       oprot.writeFieldEnd()
+     if self.table_name is not None:
+       oprot.writeFieldBegin('table_name', TType.STRING, 2)
+       oprot.writeString(self.table_name)
+       oprot.writeFieldEnd()
+     if self.column_name is not None:
+       oprot.writeFieldBegin('column_name', TType.STRING, 3)
+       oprot.writeString(self.column_name)
+       oprot.writeFieldEnd()
+     if self.key_seq is not None:
+       oprot.writeFieldBegin('key_seq', TType.I32, 4)
+       oprot.writeI32(self.key_seq)
+       oprot.writeFieldEnd()
+     if self.pk_name is not None:
+       oprot.writeFieldBegin('pk_name', TType.STRING, 5)
+       oprot.writeString(self.pk_name)
+       oprot.writeFieldEnd()
+     if self.enable_cstr is not None:
+       oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6)
+       oprot.writeBool(self.enable_cstr)
+       oprot.writeFieldEnd()
+     if self.validate_cstr is not None:
+       oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7)
+       oprot.writeBool(self.validate_cstr)
+       oprot.writeFieldEnd()
+     if self.rely_cstr is not None:
+       oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8)
+       oprot.writeBool(self.rely_cstr)
+       oprot.writeFieldEnd()
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 9)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.table_db)
+     value = (value * 31) ^ hash(self.table_name)
+     value = (value * 31) ^ hash(self.column_name)
+     value = (value * 31) ^ hash(self.key_seq)
+     value = (value * 31) ^ hash(self.pk_name)
+     value = (value * 31) ^ hash(self.enable_cstr)
+     value = (value * 31) ^ hash(self.validate_cstr)
+     value = (value * 31) ^ hash(self.rely_cstr)
+     value = (value * 31) ^ hash(self.catName)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class SQLForeignKey:
+   """
+   Attributes:
+    - pktable_db
+    - pktable_name
+    - pkcolumn_name
+    - fktable_db
+    - fktable_name
+    - fkcolumn_name
+    - key_seq
+    - update_rule
+    - delete_rule
+    - fk_name
+    - pk_name
+    - enable_cstr
+    - validate_cstr
+    - rely_cstr
+    - catName
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'pktable_db', None, None, ), # 1
+     (2, TType.STRING, 'pktable_name', None, None, ), # 2
+     (3, TType.STRING, 'pkcolumn_name', None, None, ), # 3
+     (4, TType.STRING, 'fktable_db', None, None, ), # 4
+     (5, TType.STRING, 'fktable_name', None, None, ), # 5
+     (6, TType.STRING, 'fkcolumn_name', None, None, ), # 6
+     (7, TType.I32, 'key_seq', None, None, ), # 7
+     (8, TType.I32, 'update_rule', None, None, ), # 8
+     (9, TType.I32, 'delete_rule', None, None, ), # 9
+     (10, TType.STRING, 'fk_name', None, None, ), # 10
+     (11, TType.STRING, 'pk_name', None, None, ), # 11
+     (12, TType.BOOL, 'enable_cstr', None, None, ), # 12
+     (13, TType.BOOL, 'validate_cstr', None, None, ), # 13
+     (14, TType.BOOL, 'rely_cstr', None, None, ), # 14
+     (15, TType.STRING, 'catName', None, None, ), # 15
+   )
+ 
+   def __init__(self, pktable_db=None, pktable_name=None, pkcolumn_name=None, fktable_db=None, fktable_name=None, fkcolumn_name=None, key_seq=None, update_rule=None, delete_rule=None, fk_name=None, pk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None, catName=None,):
+     self.pktable_db = pktable_db
+     self.pktable_name = pktable_name
+     self.pkcolumn_name = pkcolumn_name
+     self.fktable_db = fktable_db
+     self.fktable_name = fktable_name
+     self.fkcolumn_name = fkcolumn_name
+     self.key_seq = key_seq
+     self.update_rule = update_rule
+     self.delete_rule = delete_rule
+     self.fk_name = fk_name
+     self.pk_name = pk_name
+     self.enable_cstr = enable_cstr
+     self.validate_cstr = validate_cstr
+     self.rely_cstr = rely_cstr
+     self.catName = catName
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.pktable_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.pktable_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.pkcolumn_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.STRING:
+           self.fktable_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.fktable_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.STRING:
+           self.fkcolumn_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.I32:
+           self.key_seq = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 8:
+         if ftype == TType.I32:
+           self.update_rule = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 9:
+         if ftype == TType.I32:
+           self.delete_rule = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 10:
+         if ftype == TType.STRING:
+           self.fk_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 11:
+         if ftype == TType.STRING:
+           self.pk_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 12:
+         if ftype == TType.BOOL:
+           self.enable_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 13:
+         if ftype == TType.BOOL:
+           self.validate_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 14:
+         if ftype == TType.BOOL:
+           self.rely_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 15:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('SQLForeignKey')
+     if self.pktable_db is not None:
+       oprot.writeFieldBegin('pktable_db', TType.STRING, 1)
+       oprot.writeString(self.pktable_db)
+       oprot.writeFieldEnd()
+     if self.pktable_name is not None:
+       oprot.writeFieldBegin('pktable_name', TType.STRING, 2)
+       oprot.writeString(self.pktable_name)
+       oprot.writeFieldEnd()
+     if self.pkcolumn_name is not None:
+       oprot.writeFieldBegin('pkcolumn_name', TType.STRING, 3)
+       oprot.writeString(self.pkcolumn_name)
+       oprot.writeFieldEnd()
+     if self.fktable_db is not None:
+       oprot.writeFieldBegin('fktable_db', TType.STRING, 4)
+       oprot.writeString(self.fktable_db)
+       oprot.writeFieldEnd()
+     if self.fktable_name is not None:
+       oprot.writeFieldBegin('fktable_name', TType.STRING, 5)
+       oprot.writeString(self.fktable_name)
+       oprot.writeFieldEnd()
+     if self.fkcolumn_name is not None:
+       oprot.writeFieldBegin('fkcolumn_name', TType.STRING, 6)
+       oprot.writeString(self.fkcolumn_name)
+       oprot.writeFieldEnd()
+     if self.key_seq is not None:
+       oprot.writeFieldBegin('key_seq', TType.I32, 7)
+       oprot.writeI32(self.key_seq)
+       oprot.writeFieldEnd()
+     if self.update_rule is not None:
+       oprot.writeFieldBegin('update_rule', TType.I32, 8)
+       oprot.writeI32(self.update_rule)
+       oprot.writeFieldEnd()
+     if self.delete_rule is not None:
+       oprot.writeFieldBegin('delete_rule', TType.I32, 9)
+       oprot.writeI32(self.delete_rule)
+       oprot.writeFieldEnd()
+     if self.fk_name is not None:
+       oprot.writeFieldBegin('fk_name', TType.STRING, 10)
+       oprot.writeString(self.fk_name)
+       oprot.writeFieldEnd()
+     if self.pk_name is not None:
+       oprot.writeFieldBegin('pk_name', TType.STRING, 11)
+       oprot.writeString(self.pk_name)
+       oprot.writeFieldEnd()
+     if self.enable_cstr is not None:
+       oprot.writeFieldBegin('enable_cstr', TType.BOOL, 12)
+       oprot.writeBool(self.enable_cstr)
+       oprot.writeFieldEnd()
+     if self.validate_cstr is not None:
+       oprot.writeFieldBegin('validate_cstr', TType.BOOL, 13)
+       oprot.writeBool(self.validate_cstr)
+       oprot.writeFieldEnd()
+     if self.rely_cstr is not None:
+       oprot.writeFieldBegin('rely_cstr', TType.BOOL, 14)
+       oprot.writeBool(self.rely_cstr)
+       oprot.writeFieldEnd()
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 15)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.pktable_db)
+     value = (value * 31) ^ hash(self.pktable_name)
+     value = (value * 31) ^ hash(self.pkcolumn_name)
+     value = (value * 31) ^ hash(self.fktable_db)
+     value = (value * 31) ^ hash(self.fktable_name)
+     value = (value * 31) ^ hash(self.fkcolumn_name)
+     value = (value * 31) ^ hash(self.key_seq)
+     value = (value * 31) ^ hash(self.update_rule)
+     value = (value * 31) ^ hash(self.delete_rule)
+     value = (value * 31) ^ hash(self.fk_name)
+     value = (value * 31) ^ hash(self.pk_name)
+     value = (value * 31) ^ hash(self.enable_cstr)
+     value = (value * 31) ^ hash(self.validate_cstr)
+     value = (value * 31) ^ hash(self.rely_cstr)
+     value = (value * 31) ^ hash(self.catName)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class SQLUniqueConstraint:
+   """
+   Attributes:
+    - catName
+    - table_db
+    - table_name
+    - column_name
+    - key_seq
+    - uk_name
+    - enable_cstr
+    - validate_cstr
+    - rely_cstr
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'catName', None, None, ), # 1
+     (2, TType.STRING, 'table_db', None, None, ), # 2
+     (3, TType.STRING, 'table_name', None, None, ), # 3
+     (4, TType.STRING, 'column_name', None, None, ), # 4
+     (5, TType.I32, 'key_seq', None, None, ), # 5
+     (6, TType.STRING, 'uk_name', None, None, ), # 6
+     (7, TType.BOOL, 'enable_cstr', None, None, ), # 7
+     (8, TType.BOOL, 'validate_cstr', None, None, ), # 8
+     (9, TType.BOOL, 'rely_cstr', None, None, ), # 9
+   )
+ 
+   def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, key_seq=None, uk_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,):
+     self.catName = catName
+     self.table_db = table_db
+     self.table_name = table_name
+     self.column_name = column_name
+     self.key_seq = key_seq
+     self.uk_name = uk_name
+     self.enable_cstr = enable_cstr
+     self.validate_cstr = validate_cstr
+     self.rely_cstr = rely_cstr
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.table_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.table_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.STRING:
+           self.column_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.I32:
+           self.key_seq = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.STRING:
+           self.uk_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.BOOL:
+           self.enable_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 8:
+         if ftype == TType.BOOL:
+           self.validate_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 9:
+         if ftype == TType.BOOL:
+           self.rely_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('SQLUniqueConstraint')
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 1)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     if self.table_db is not None:
+       oprot.writeFieldBegin('table_db', TType.STRING, 2)
+       oprot.writeString(self.table_db)
+       oprot.writeFieldEnd()
+     if self.table_name is not None:
+       oprot.writeFieldBegin('table_name', TType.STRING, 3)
+       oprot.writeString(self.table_name)
+       oprot.writeFieldEnd()
+     if self.column_name is not None:
+       oprot.writeFieldBegin('column_name', TType.STRING, 4)
+       oprot.writeString(self.column_name)
+       oprot.writeFieldEnd()
+     if self.key_seq is not None:
+       oprot.writeFieldBegin('key_seq', TType.I32, 5)
+       oprot.writeI32(self.key_seq)
+       oprot.writeFieldEnd()
+     if self.uk_name is not None:
+       oprot.writeFieldBegin('uk_name', TType.STRING, 6)
+       oprot.writeString(self.uk_name)
+       oprot.writeFieldEnd()
+     if self.enable_cstr is not None:
+       oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7)
+       oprot.writeBool(self.enable_cstr)
+       oprot.writeFieldEnd()
+     if self.validate_cstr is not None:
+       oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8)
+       oprot.writeBool(self.validate_cstr)
+       oprot.writeFieldEnd()
+     if self.rely_cstr is not None:
+       oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9)
+       oprot.writeBool(self.rely_cstr)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.catName)
+     value = (value * 31) ^ hash(self.table_db)
+     value = (value * 31) ^ hash(self.table_name)
+     value = (value * 31) ^ hash(self.column_name)
+     value = (value * 31) ^ hash(self.key_seq)
+     value = (value * 31) ^ hash(self.uk_name)
+     value = (value * 31) ^ hash(self.enable_cstr)
+     value = (value * 31) ^ hash(self.validate_cstr)
+     value = (value * 31) ^ hash(self.rely_cstr)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class SQLNotNullConstraint:
+   """
+   Attributes:
+    - catName
+    - table_db
+    - table_name
+    - column_name
+    - nn_name
+    - enable_cstr
+    - validate_cstr
+    - rely_cstr
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'catName', None, None, ), # 1
+     (2, TType.STRING, 'table_db', None, None, ), # 2
+     (3, TType.STRING, 'table_name', None, None, ), # 3
+     (4, TType.STRING, 'column_name', None, None, ), # 4
+     (5, TType.STRING, 'nn_name', None, None, ), # 5
+     (6, TType.BOOL, 'enable_cstr', None, None, ), # 6
+     (7, TType.BOOL, 'validate_cstr', None, None, ), # 7
+     (8, TType.BOOL, 'rely_cstr', None, None, ), # 8
+   )
+ 
+   def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, nn_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,):
+     self.catName = catName
+     self.table_db = table_db
+     self.table_name = table_name
+     self.column_name = column_name
+     self.nn_name = nn_name
+     self.enable_cstr = enable_cstr
+     self.validate_cstr = validate_cstr
+     self.rely_cstr = rely_cstr
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.table_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.table_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.STRING:
+           self.column_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.nn_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.BOOL:
+           self.enable_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.BOOL:
+           self.validate_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 8:
+         if ftype == TType.BOOL:
+           self.rely_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('SQLNotNullConstraint')
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 1)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     if self.table_db is not None:
+       oprot.writeFieldBegin('table_db', TType.STRING, 2)
+       oprot.writeString(self.table_db)
+       oprot.writeFieldEnd()
+     if self.table_name is not None:
+       oprot.writeFieldBegin('table_name', TType.STRING, 3)
+       oprot.writeString(self.table_name)
+       oprot.writeFieldEnd()
+     if self.column_name is not None:
+       oprot.writeFieldBegin('column_name', TType.STRING, 4)
+       oprot.writeString(self.column_name)
+       oprot.writeFieldEnd()
+     if self.nn_name is not None:
+       oprot.writeFieldBegin('nn_name', TType.STRING, 5)
+       oprot.writeString(self.nn_name)
+       oprot.writeFieldEnd()
+     if self.enable_cstr is not None:
+       oprot.writeFieldBegin('enable_cstr', TType.BOOL, 6)
+       oprot.writeBool(self.enable_cstr)
+       oprot.writeFieldEnd()
+     if self.validate_cstr is not None:
+       oprot.writeFieldBegin('validate_cstr', TType.BOOL, 7)
+       oprot.writeBool(self.validate_cstr)
+       oprot.writeFieldEnd()
+     if self.rely_cstr is not None:
+       oprot.writeFieldBegin('rely_cstr', TType.BOOL, 8)
+       oprot.writeBool(self.rely_cstr)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.catName)
+     value = (value * 31) ^ hash(self.table_db)
+     value = (value * 31) ^ hash(self.table_name)
+     value = (value * 31) ^ hash(self.column_name)
+     value = (value * 31) ^ hash(self.nn_name)
+     value = (value * 31) ^ hash(self.enable_cstr)
+     value = (value * 31) ^ hash(self.validate_cstr)
+     value = (value * 31) ^ hash(self.rely_cstr)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class SQLDefaultConstraint:
+   """
+   Attributes:
+    - catName
+    - table_db
+    - table_name
+    - column_name
+    - default_value
+    - dc_name
+    - enable_cstr
+    - validate_cstr
+    - rely_cstr
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'catName', None, None, ), # 1
+     (2, TType.STRING, 'table_db', None, None, ), # 2
+     (3, TType.STRING, 'table_name', None, None, ), # 3
+     (4, TType.STRING, 'column_name', None, None, ), # 4
+     (5, TType.STRING, 'default_value', None, None, ), # 5
+     (6, TType.STRING, 'dc_name', None, None, ), # 6
+     (7, TType.BOOL, 'enable_cstr', None, None, ), # 7
+     (8, TType.BOOL, 'validate_cstr', None, None, ), # 8
+     (9, TType.BOOL, 'rely_cstr', None, None, ), # 9
+   )
+ 
+   def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, default_value=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,):
+     self.catName = catName
+     self.table_db = table_db
+     self.table_name = table_name
+     self.column_name = column_name
+     self.default_value = default_value
+     self.dc_name = dc_name
+     self.enable_cstr = enable_cstr
+     self.validate_cstr = validate_cstr
+     self.rely_cstr = rely_cstr
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.table_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.table_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.STRING:
+           self.column_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.default_value = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.STRING:
+           self.dc_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.BOOL:
+           self.enable_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 8:
+         if ftype == TType.BOOL:
+           self.validate_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 9:
+         if ftype == TType.BOOL:
+           self.rely_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('SQLDefaultConstraint')
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 1)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     if self.table_db is not None:
+       oprot.writeFieldBegin('table_db', TType.STRING, 2)
+       oprot.writeString(self.table_db)
+       oprot.writeFieldEnd()
+     if self.table_name is not None:
+       oprot.writeFieldBegin('table_name', TType.STRING, 3)
+       oprot.writeString(self.table_name)
+       oprot.writeFieldEnd()
+     if self.column_name is not None:
+       oprot.writeFieldBegin('column_name', TType.STRING, 4)
+       oprot.writeString(self.column_name)
+       oprot.writeFieldEnd()
+     if self.default_value is not None:
+       oprot.writeFieldBegin('default_value', TType.STRING, 5)
+       oprot.writeString(self.default_value)
+       oprot.writeFieldEnd()
+     if self.dc_name is not None:
+       oprot.writeFieldBegin('dc_name', TType.STRING, 6)
+       oprot.writeString(self.dc_name)
+       oprot.writeFieldEnd()
+     if self.enable_cstr is not None:
+       oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7)
+       oprot.writeBool(self.enable_cstr)
+       oprot.writeFieldEnd()
+     if self.validate_cstr is not None:
+       oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8)
+       oprot.writeBool(self.validate_cstr)
+       oprot.writeFieldEnd()
+     if self.rely_cstr is not None:
+       oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9)
+       oprot.writeBool(self.rely_cstr)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.catName)
+     value = (value * 31) ^ hash(self.table_db)
+     value = (value * 31) ^ hash(self.table_name)
+     value = (value * 31) ^ hash(self.column_name)
+     value = (value * 31) ^ hash(self.default_value)
+     value = (value * 31) ^ hash(self.dc_name)
+     value = (value * 31) ^ hash(self.enable_cstr)
+     value = (value * 31) ^ hash(self.validate_cstr)
+     value = (value * 31) ^ hash(self.rely_cstr)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class SQLCheckConstraint:
+   """
+   Attributes:
+    - catName
+    - table_db
+    - table_name
+    - column_name
+    - check_expression
+    - dc_name
+    - enable_cstr
+    - validate_cstr
+    - rely_cstr
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'catName', None, None, ), # 1
+     (2, TType.STRING, 'table_db', None, None, ), # 2
+     (3, TType.STRING, 'table_name', None, None, ), # 3
+     (4, TType.STRING, 'column_name', None, None, ), # 4
+     (5, TType.STRING, 'check_expression', None, None, ), # 5
+     (6, TType.STRING, 'dc_name', None, None, ), # 6
+     (7, TType.BOOL, 'enable_cstr', None, None, ), # 7
+     (8, TType.BOOL, 'validate_cstr', None, None, ), # 8
+     (9, TType.BOOL, 'rely_cstr', None, None, ), # 9
+   )
+ 
+   def __init__(self, catName=None, table_db=None, table_name=None, column_name=None, check_expression=None, dc_name=None, enable_cstr=None, validate_cstr=None, rely_cstr=None,):
+     self.catName = catName
+     self.table_db = table_db
+     self.table_name = table_name
+     self.column_name = column_name
+     self.check_expression = check_expression
+     self.dc_name = dc_name
+     self.enable_cstr = enable_cstr
+     self.validate_cstr = validate_cstr
+     self.rely_cstr = rely_cstr
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.table_db = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.table_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.STRING:
+           self.column_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.check_expression = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.STRING:
+           self.dc_name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.BOOL:
+           self.enable_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 8:
+         if ftype == TType.BOOL:
+           self.validate_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 9:
+         if ftype == TType.BOOL:
+           self.rely_cstr = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('SQLCheckConstraint')
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 1)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     if self.table_db is not None:
+       oprot.writeFieldBegin('table_db', TType.STRING, 2)
+       oprot.writeString(self.table_db)
+       oprot.writeFieldEnd()
+     if self.table_name is not None:
+       oprot.writeFieldBegin('table_name', TType.STRING, 3)
+       oprot.writeString(self.table_name)
+       oprot.writeFieldEnd()
+     if self.column_name is not None:
+       oprot.writeFieldBegin('column_name', TType.STRING, 4)
+       oprot.writeString(self.column_name)
+       oprot.writeFieldEnd()
+     if self.check_expression is not None:
+       oprot.writeFieldBegin('check_expression', TType.STRING, 5)
+       oprot.writeString(self.check_expression)
+       oprot.writeFieldEnd()
+     if self.dc_name is not None:
+       oprot.writeFieldBegin('dc_name', TType.STRING, 6)
+       oprot.writeString(self.dc_name)
+       oprot.writeFieldEnd()
+     if self.enable_cstr is not None:
+       oprot.writeFieldBegin('enable_cstr', TType.BOOL, 7)
+       oprot.writeBool(self.enable_cstr)
+       oprot.writeFieldEnd()
+     if self.validate_cstr is not None:
+       oprot.writeFieldBegin('validate_cstr', TType.BOOL, 8)
+       oprot.writeBool(self.validate_cstr)
+       oprot.writeFieldEnd()
+     if self.rely_cstr is not None:
+       oprot.writeFieldBegin('rely_cstr', TType.BOOL, 9)
+       oprot.writeBool(self.rely_cstr)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.catName)
+     value = (value * 31) ^ hash(self.table_db)
+     value = (value * 31) ^ hash(self.table_name)
+     value = (value * 31) ^ hash(self.column_name)
+     value = (value * 31) ^ hash(self.check_expression)
+     value = (value * 31) ^ hash(self.dc_name)
+     value = (value * 31) ^ hash(self.enable_cstr)
+     value = (value * 31) ^ hash(self.validate_cstr)
+     value = (value * 31) ^ hash(self.rely_cstr)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class Type:
+   """
+   Attributes:
+    - name
+    - type1
+    - type2
+    - fields
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'name', None, None, ), # 1
+     (2, TType.STRING, 'type1', None, None, ), # 2
+     (3, TType.STRING, 'type2', None, None, ), # 3
+     (4, TType.LIST, 'fields', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 4
+   )
+ 
+   def __init__(self, name=None, type1=None, type2=None, fields=None,):
+     self.name = name
+     self.type1 = type1
+     self.type2 = type2
+     self.fields = fields
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.name = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.type1 = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.type2 = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.LIST:
+           self.fields = []
+           (_etype3, _size0) = iprot.readListBegin()
+           for _i4 in xrange(_size0):
+             _elem5 = FieldSchema()
+             _elem5.read(iprot)
+             self.fields.append(_elem5)
+           iprot.readListEnd()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('Type')
+     if self.name is not None:
+       oprot.writeFieldBegin('name', TType.STRING, 1)
+       oprot.writeString(self.name)
+       oprot.writeFieldEnd()
+     if self.type1 is not None:
+       oprot.writeFieldBegin('type1', TType.STRING, 2)
+       oprot.writeString(self.type1)
+       oprot.writeFieldEnd()
+     if self.type2 is not None:
+       oprot.writeFieldBegin('type2', TType.STRING, 3)
+       oprot.writeString(self.type2)
+       oprot.writeFieldEnd()
+     if self.fields is not None:
+       oprot.writeFieldBegin('fields', TType.LIST, 4)
+       oprot.writeListBegin(TType.STRUCT, len(self.fields))
+       for iter6 in self.fields:
+         iter6.write(oprot)
+       oprot.writeListEnd()
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.name)
+     value = (value * 31) ^ hash(self.type1)
+     value = (value * 31) ^ hash(self.type2)
+     value = (value * 31) ^ hash(self.fields)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class HiveObjectRef:
+   """
+   Attributes:
+    - objectType
+    - dbName
+    - objectName
+    - partValues
+    - columnName
+    - catName
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.I32, 'objectType', None, None, ), # 1
+     (2, TType.STRING, 'dbName', None, None, ), # 2
+     (3, TType.STRING, 'objectName', None, None, ), # 3
+     (4, TType.LIST, 'partValues', (TType.STRING,None), None, ), # 4
+     (5, TType.STRING, 'columnName', None, None, ), # 5
+     (6, TType.STRING, 'catName', None, None, ), # 6
+   )
+ 
+   def __init__(self, objectType=None, dbName=None, objectName=None, partValues=None, columnName=None, catName=None,):
+     self.objectType = objectType
+     self.dbName = dbName
+     self.objectName = objectName
+     self.partValues = partValues
+     self.columnName = columnName
+     self.catName = catName
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.I32:
+           self.objectType = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.dbName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.objectName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.LIST:
+           self.partValues = []
+           (_etype10, _size7) = iprot.readListBegin()
+           for _i11 in xrange(_size7):
+             _elem12 = iprot.readString()
+             self.partValues.append(_elem12)
+           iprot.readListEnd()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.columnName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.STRING:
+           self.catName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('HiveObjectRef')
+     if self.objectType is not None:
+       oprot.writeFieldBegin('objectType', TType.I32, 1)
+       oprot.writeI32(self.objectType)
+       oprot.writeFieldEnd()
+     if self.dbName is not None:
+       oprot.writeFieldBegin('dbName', TType.STRING, 2)
+       oprot.writeString(self.dbName)
+       oprot.writeFieldEnd()
+     if self.objectName is not None:
+       oprot.writeFieldBegin('objectName', TType.STRING, 3)
+       oprot.writeString(self.objectName)
+       oprot.writeFieldEnd()
+     if self.partValues is not None:
+       oprot.writeFieldBegin('partValues', TType.LIST, 4)
+       oprot.writeListBegin(TType.STRING, len(self.partValues))
+       for iter13 in self.partValues:
+         oprot.writeString(iter13)
+       oprot.writeListEnd()
+       oprot.writeFieldEnd()
+     if self.columnName is not None:
+       oprot.writeFieldBegin('columnName', TType.STRING, 5)
+       oprot.writeString(self.columnName)
+       oprot.writeFieldEnd()
+     if self.catName is not None:
+       oprot.writeFieldBegin('catName', TType.STRING, 6)
+       oprot.writeString(self.catName)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.objectType)
+     value = (value * 31) ^ hash(self.dbName)
+     value = (value * 31) ^ hash(self.objectName)
+     value = (value * 31) ^ hash(self.partValues)
+     value = (value * 31) ^ hash(self.columnName)
+     value = (value * 31) ^ hash(self.catName)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class PrivilegeGrantInfo:
+   """
+   Attributes:
+    - privilege
+    - createTime
+    - grantor
+    - grantorType
+    - grantOption
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'privilege', None, None, ), # 1
+     (2, TType.I32, 'createTime', None, None, ), # 2
+     (3, TType.STRING, 'grantor', None, None, ), # 3
+     (4, TType.I32, 'grantorType', None, None, ), # 4
+     (5, TType.BOOL, 'grantOption', None, None, ), # 5
+   )
+ 
+   def __init__(self, privilege=None, createTime=None, grantor=None, grantorType=None, grantOption=None,):
+     self.privilege = privilege
+     self.createTime = createTime
+     self.grantor = grantor
+     self.grantorType = grantorType
+     self.grantOption = grantOption
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.privilege = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.I32:
+           self.createTime = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.grantor = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.I32:
+           self.grantorType = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.BOOL:
+           self.grantOption = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('PrivilegeGrantInfo')
+     if self.privilege is not None:
+       oprot.writeFieldBegin('privilege', TType.STRING, 1)
+       oprot.writeString(self.privilege)
+       oprot.writeFieldEnd()
+     if self.createTime is not None:
+       oprot.writeFieldBegin('createTime', TType.I32, 2)
+       oprot.writeI32(self.createTime)
+       oprot.writeFieldEnd()
+     if self.grantor is not None:
+       oprot.writeFieldBegin('grantor', TType.STRING, 3)
+       oprot.writeString(self.grantor)
+       oprot.writeFieldEnd()
+     if self.grantorType is not None:
+       oprot.writeFieldBegin('grantorType', TType.I32, 4)
+       oprot.writeI32(self.grantorType)
+       oprot.writeFieldEnd()
+     if self.grantOption is not None:
+       oprot.writeFieldBegin('grantOption', TType.BOOL, 5)
+       oprot.writeBool(self.grantOption)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.privilege)
+     value = (value * 31) ^ hash(self.createTime)
+     value = (value * 31) ^ hash(self.grantor)
+     value = (value * 31) ^ hash(self.grantorType)
+     value = (value * 31) ^ hash(self.grantOption)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class HiveObjectPrivilege:
+   """
+   Attributes:
+    - hiveObject
+    - principalName
+    - principalType
+    - grantInfo
+    - authorizer
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRUCT, 'hiveObject', (HiveObjectRef, HiveObjectRef.thrift_spec), None, ), # 1
+     (2, TType.STRING, 'principalName', None, None, ), # 2
+     (3, TType.I32, 'principalType', None, None, ), # 3
+     (4, TType.STRUCT, 'grantInfo', (PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec), None, ), # 4
+     (5, TType.STRING, 'authorizer', None, None, ), # 5
+   )
+ 
+   def __init__(self, hiveObject=None, principalName=None, principalType=None, grantInfo=None, authorizer=None,):
+     self.hiveObject = hiveObject
+     self.principalName = principalName
+     self.principalType = principalType
+     self.grantInfo = grantInfo
+     self.authorizer = authorizer
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRUCT:
+           self.hiveObject = HiveObjectRef()
+           self.hiveObject.read(iprot)
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.principalName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.I32:
+           self.principalType = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.STRUCT:
+           self.grantInfo = PrivilegeGrantInfo()
+           self.grantInfo.read(iprot)
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.STRING:
+           self.authorizer = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('HiveObjectPrivilege')
+     if self.hiveObject is not None:
+       oprot.writeFieldBegin('hiveObject', TType.STRUCT, 1)
+       self.hiveObject.write(oprot)
+       oprot.writeFieldEnd()
+     if self.principalName is not None:
+       oprot.writeFieldBegin('principalName', TType.STRING, 2)
+       oprot.writeString(self.principalName)
+       oprot.writeFieldEnd()
+     if self.principalType is not None:
+       oprot.writeFieldBegin('principalType', TType.I32, 3)
+       oprot.writeI32(self.principalType)
+       oprot.writeFieldEnd()
+     if self.grantInfo is not None:
+       oprot.writeFieldBegin('grantInfo', TType.STRUCT, 4)
+       self.grantInfo.write(oprot)
+       oprot.writeFieldEnd()
+     if self.authorizer is not None:
+       oprot.writeFieldBegin('authorizer', TType.STRING, 5)
+       oprot.writeString(self.authorizer)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.hiveObject)
+     value = (value * 31) ^ hash(self.principalName)
+     value = (value * 31) ^ hash(self.principalType)
+     value = (value * 31) ^ hash(self.grantInfo)
+     value = (value * 31) ^ hash(self.authorizer)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class PrivilegeBag:
+   """
+   Attributes:
+    - privileges
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.LIST, 'privileges', (TType.STRUCT,(HiveObjectPrivilege, HiveObjectPrivilege.thrift_spec)), None, ), # 1
+   )
+ 
+   def __init__(self, privileges=None,):
+     self.privileges = privileges
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.LIST:
+           self.privileges = []
+           (_etype17, _size14) = iprot.readListBegin()
+           for _i18 in xrange(_size14):
+             _elem19 = HiveObjectPrivilege()
+             _elem19.read(iprot)
+             self.privileges.append(_elem19)
+           iprot.readListEnd()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('PrivilegeBag')
+     if self.privileges is not None:
+       oprot.writeFieldBegin('privileges', TType.LIST, 1)
+       oprot.writeListBegin(TType.STRUCT, len(self.privileges))
+       for iter20 in self.privileges:
+         iter20.write(oprot)
+       oprot.writeListEnd()
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.privileges)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class PrincipalPrivilegeSet:
+   """
+   Attributes:
+    - userPrivileges
+    - groupPrivileges
+    - rolePrivileges
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.MAP, 'userPrivileges', (TType.STRING,None,TType.LIST,(TType.STRUCT,(PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec))), None, ), # 1
+     (2, TType.MAP, 'groupPrivileges', (TType.STRING,None,TType.LIST,(TType.STRUCT,(PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec))), None, ), # 2
+     (3, TType.MAP, 'rolePrivileges', (TType.STRING,None,TType.LIST,(TType.STRUCT,(PrivilegeGrantInfo, PrivilegeGrantInfo.thrift_spec))), None, ), # 3
+   )
+ 
+   def __init__(self, userPrivileges=None, groupPrivileges=None, rolePrivileges=None,):
+     self.userPrivileges = userPrivileges
+     self.groupPrivileges = groupPrivileges
+     self.rolePrivileges = rolePrivileges
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.MAP:
+           self.userPrivileges = {}
+           (_ktype22, _vtype23, _size21 ) = iprot.readMapBegin()
+           for _i25 in xrange(_size21):
+             _key26 = iprot.readString()
+             _val27 = []
+             (_etype31, _size28) = iprot.readListBegin()
+             for _i32 in xrange(_size28):
+               _elem33 = PrivilegeGrantInfo()
+               _elem33.read(iprot)
+               _val27.append(_elem33)
+             iprot.readListEnd()
+             self.userPrivileges[_key26] = _val27
+           iprot.readMapEnd()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.MAP:
+           self.groupPrivileges = {}
+           (_ktype35, _vtype36, _size34 ) = iprot.readMapBegin()
+           for _i38 in xrange(_size34):
+             _key39 = iprot.readString()
+             _val40 = []
+             (_etype44, _size41) = iprot.readListBegin()
+             for _i45 in xrange(_size41):
+               _elem46 = PrivilegeGrantInfo()
+               _elem46.read(iprot)
+               _val40.append(_elem46)
+             iprot.readListEnd()
+             self.groupPrivileges[_key39] = _val40
+           iprot.readMapEnd()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.MAP:
+           self.rolePrivileges = {}
+           (_ktype48, _vtype49, _size47 ) = iprot.readMapBegin()
+           for _i51 in xrange(_size47):
+             _key52 = iprot.readString()
+             _val53 = []
+             (_etype57, _size54) = iprot.readListBegin()
+             for _i58 in xrange(_size54):
+               _elem59 = PrivilegeGrantInfo()
+               _elem59.read(iprot)
+               _val53.append(_elem59)
+             iprot.readListEnd()
+             self.rolePrivileges[_key52] = _val53
+           iprot.readMapEnd()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('PrincipalPrivilegeSet')
+     if self.userPrivileges is not None:
+       oprot.writeFieldBegin('userPrivileges', TType.MAP, 1)
+       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.userPrivileges))
+       for kiter60,viter61 in self.userPrivileges.items():
+         oprot.writeString(kiter60)
+         oprot.writeListBegin(TType.STRUCT, len(viter61))
+         for iter62 in viter61:
+           iter62.write(oprot)
+         oprot.writeListEnd()
+       oprot.writeMapEnd()
+       oprot.writeFieldEnd()
+     if self.groupPrivileges is not None:
+       oprot.writeFieldBegin('groupPrivileges', TType.MAP, 2)
+       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.groupPrivileges))
+       for kiter63,viter64 in self.groupPrivileges.items():
+         oprot.writeString(kiter63)
+         oprot.writeListBegin(TType.STRUCT, len(viter64))
+         for iter65 in viter64:
+           iter65.write(oprot)
+         oprot.writeListEnd()
+       oprot.writeMapEnd()
+       oprot.writeFieldEnd()
+     if self.rolePrivileges is not None:
+       oprot.writeFieldBegin('rolePrivileges', TType.MAP, 3)
+       oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.rolePrivileges))
+       for kiter66,viter67 in self.rolePrivileges.items():
+         oprot.writeString(kiter66)
+         oprot.writeListBegin(TType.STRUCT, len(viter67))
+         for iter68 in viter67:
+           iter68.write(oprot)
+         oprot.writeListEnd()
+       oprot.writeMapEnd()
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.userPrivileges)
+     value = (value * 31) ^ hash(self.groupPrivileges)
+     value = (value * 31) ^ hash(self.rolePrivileges)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class GrantRevokePrivilegeRequest:
+   """
+   Attributes:
+    - requestType
+    - privileges
+    - revokeGrantOption
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.I32, 'requestType', None, None, ), # 1
+     (2, TType.STRUCT, 'privileges', (PrivilegeBag, PrivilegeBag.thrift_spec), None, ), # 2
+     (3, TType.BOOL, 'revokeGrantOption', None, None, ), # 3
+   )
+ 
+   def __init__(self, requestType=None, privileges=None, revokeGrantOption=None,):
+     self.requestType = requestType
+     self.privileges = privileges
+     self.revokeGrantOption = revokeGrantOption
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.I32:
+           self.requestType = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRUCT:
+           self.privileges = PrivilegeBag()
+           self.privileges.read(iprot)
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.BOOL:
+           self.revokeGrantOption = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('GrantRevokePrivilegeRequest')
+     if self.requestType is not None:
+       oprot.writeFieldBegin('requestType', TType.I32, 1)
+       oprot.writeI32(self.requestType)
+       oprot.writeFieldEnd()
+     if self.privileges is not None:
+       oprot.writeFieldBegin('privileges', TType.STRUCT, 2)
+       self.privileges.write(oprot)
+       oprot.writeFieldEnd()
+     if self.revokeGrantOption is not None:
+       oprot.writeFieldBegin('revokeGrantOption', TType.BOOL, 3)
+       oprot.writeBool(self.revokeGrantOption)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.requestType)
+     value = (value * 31) ^ hash(self.privileges)
+     value = (value * 31) ^ hash(self.revokeGrantOption)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class GrantRevokePrivilegeResponse:
+   """
+   Attributes:
+    - success
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.BOOL, 'success', None, None, ), # 1
+   )
+ 
+   def __init__(self, success=None,):
+     self.success = success
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.BOOL:
+           self.success = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('GrantRevokePrivilegeResponse')
+     if self.success is not None:
+       oprot.writeFieldBegin('success', TType.BOOL, 1)
+       oprot.writeBool(self.success)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.success)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class Role:
+   """
+   Attributes:
+    - roleName
+    - createTime
+    - ownerName
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'roleName', None, None, ), # 1
+     (2, TType.I32, 'createTime', None, None, ), # 2
+     (3, TType.STRING, 'ownerName', None, None, ), # 3
+   )
+ 
+   def __init__(self, roleName=None, createTime=None, ownerName=None,):
+     self.roleName = roleName
+     self.createTime = createTime
+     self.ownerName = ownerName
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.roleName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.I32:
+           self.createTime = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.STRING:
+           self.ownerName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('Role')
+     if self.roleName is not None:
+       oprot.writeFieldBegin('roleName', TType.STRING, 1)
+       oprot.writeString(self.roleName)
+       oprot.writeFieldEnd()
+     if self.createTime is not None:
+       oprot.writeFieldBegin('createTime', TType.I32, 2)
+       oprot.writeI32(self.createTime)
+       oprot.writeFieldEnd()
+     if self.ownerName is not None:
+       oprot.writeFieldBegin('ownerName', TType.STRING, 3)
+       oprot.writeString(self.ownerName)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.roleName)
+     value = (value * 31) ^ hash(self.createTime)
+     value = (value * 31) ^ hash(self.ownerName)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class RolePrincipalGrant:
+   """
+   Attributes:
+    - roleName
+    - principalName
+    - principalType
+    - grantOption
+    - grantTime
+    - grantorName
+    - grantorPrincipalType
+   """
+ 
+   thrift_spec = (
+     None, # 0
+     (1, TType.STRING, 'roleName', None, None, ), # 1
+     (2, TType.STRING, 'principalName', None, None, ), # 2
+     (3, TType.I32, 'principalType', None, None, ), # 3
+     (4, TType.BOOL, 'grantOption', None, None, ), # 4
+     (5, TType.I32, 'grantTime', None, None, ), # 5
+     (6, TType.STRING, 'grantorName', None, None, ), # 6
+     (7, TType.I32, 'grantorPrincipalType', None, None, ), # 7
+   )
+ 
+   def __init__(self, roleName=None, principalName=None, principalType=None, grantOption=None, grantTime=None, grantorName=None, grantorPrincipalType=None,):
+     self.roleName = roleName
+     self.principalName = principalName
+     self.principalType = principalType
+     self.grantOption = grantOption
+     self.grantTime = grantTime
+     self.grantorName = grantorName
+     self.grantorPrincipalType = grantorPrincipalType
+ 
+   def read(self, iprot):
+     if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
+       fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
+       return
+     iprot.readStructBegin()
+     while True:
+       (fname, ftype, fid) = iprot.readFieldBegin()
+       if ftype == TType.STOP:
+         break
+       if fid == 1:
+         if ftype == TType.STRING:
+           self.roleName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 2:
+         if ftype == TType.STRING:
+           self.principalName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 3:
+         if ftype == TType.I32:
+           self.principalType = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 4:
+         if ftype == TType.BOOL:
+           self.grantOption = iprot.readBool()
+         else:
+           iprot.skip(ftype)
+       elif fid == 5:
+         if ftype == TType.I32:
+           self.grantTime = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       elif fid == 6:
+         if ftype == TType.STRING:
+           self.grantorName = iprot.readString()
+         else:
+           iprot.skip(ftype)
+       elif fid == 7:
+         if ftype == TType.I32:
+           self.grantorPrincipalType = iprot.readI32()
+         else:
+           iprot.skip(ftype)
+       else:
+         iprot.skip(ftype)
+       iprot.readFieldEnd()
+     iprot.readStructEnd()
+ 
+   def write(self, oprot):
+     if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
+       oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
+       return
+     oprot.writeStructBegin('RolePrincipalGrant')
+     if self.roleName is not None:
+       oprot.writeFieldBegin('roleName', TType.STRING, 1)
+       oprot.writeString(self.roleName)
+       oprot.writeFieldEnd()
+     if self.principalName is not None:
+       oprot.writeFieldBegin('principalName', TType.STRING, 2)
+       oprot.writeString(self.principalName)
+       oprot.writeFieldEnd()
+     if self.principalType is not None:
+       oprot.writeFieldBegin('principalType', TType.I32, 3)
+       oprot.writeI32(self.principalType)
+       oprot.writeFieldEnd()
+     if self.grantOption is not None:
+       oprot.writeFieldBegin('grantOption', TType.BOOL, 4)
+       oprot.writeBool(self.grantOption)
+       oprot.writeFieldEnd()
+     if self.grantTime is not None:
+       oprot.writeFieldBegin('grantTime', TType.I32, 5)
+       oprot.writeI32(self.grantTime)
+       oprot.writeFieldEnd()
+     if self.grantorName is not None:
+       oprot.writeFieldBegin('grantorName', TType.STRING, 6)
+       oprot.writeString(self.grantorName)
+       oprot.writeFieldEnd()
+     if self.grantorPrincipalType is not None:
+       oprot.writeFieldBegin('grantorPrincipalType', TType.I32, 7)
+       oprot.writeI32(self.grantorPrincipalType)
+       oprot.writeFieldEnd()
+     oprot.writeFieldStop()
+     oprot.writeStructEnd()
+ 
+   def validate(self):
+     return
+ 
+ 
+   def __hash__(self):
+     value = 17
+     value = (value * 31) ^ hash(self.roleName)
+     value = (value * 31) ^ hash(self.principalName)
+     value = (value * 31) ^ hash(self.principalType)
+     value = (value * 31) ^ hash(self.grantOption)
+     value = (value * 31) ^ hash(self.grantTime)
+     value = (value * 31) ^ hash(self.grantorName)
+     value = (value * 31) ^ hash(self.grantorPrincipalType)
+     return value
+ 
+   def __repr__(self):
+     L = ['%s=%r' % (key, value)
+       for key, value in self.__dict__.iteritems()]
+     return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
+ 
+   def __eq__(self, other):
+     return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
+ 
+   def __ne__(self, other):
+     return not (self == other)
+ 
+ class GetRoleGrantsForPrincipalRequest:
+   """
+   Attributes:
+    - principal_name
+    - principal_type
+   """
+ 
+

<TRUNCATED>

[35/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
new file mode 100644
index 0000000..57eb5ef
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionRequest.java
@@ -0,0 +1,977 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CompactionRequest implements org.apache.thrift.TBase<CompactionRequest, CompactionRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CompactionRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CompactionRequest");
+
+  private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PARTITIONNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionname", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField RUNAS_FIELD_DESC = new org.apache.thrift.protocol.TField("runas", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField PROPERTIES_FIELD_DESC = new org.apache.thrift.protocol.TField("properties", org.apache.thrift.protocol.TType.MAP, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CompactionRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CompactionRequestTupleSchemeFactory());
+  }
+
+  private String dbname; // required
+  private String tablename; // required
+  private String partitionname; // optional
+  private CompactionType type; // required
+  private String runas; // optional
+  private Map<String,String> properties; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DBNAME((short)1, "dbname"),
+    TABLENAME((short)2, "tablename"),
+    PARTITIONNAME((short)3, "partitionname"),
+    /**
+     * 
+     * @see CompactionType
+     */
+    TYPE((short)4, "type"),
+    RUNAS((short)5, "runas"),
+    PROPERTIES((short)6, "properties");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DBNAME
+          return DBNAME;
+        case 2: // TABLENAME
+          return TABLENAME;
+        case 3: // PARTITIONNAME
+          return PARTITIONNAME;
+        case 4: // TYPE
+          return TYPE;
+        case 5: // RUNAS
+          return RUNAS;
+        case 6: // PROPERTIES
+          return PROPERTIES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.PARTITIONNAME,_Fields.RUNAS,_Fields.PROPERTIES};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLENAME, new org.apache.thrift.meta_data.FieldMetaData("tablename", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITIONNAME, new org.apache.thrift.meta_data.FieldMetaData("partitionname", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, CompactionType.class)));
+    tmpMap.put(_Fields.RUNAS, new org.apache.thrift.meta_data.FieldMetaData("runas", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PROPERTIES, new org.apache.thrift.meta_data.FieldMetaData("properties", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionRequest.class, metaDataMap);
+  }
+
+  public CompactionRequest() {
+  }
+
+  public CompactionRequest(
+    String dbname,
+    String tablename,
+    CompactionType type)
+  {
+    this();
+    this.dbname = dbname;
+    this.tablename = tablename;
+    this.type = type;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CompactionRequest(CompactionRequest other) {
+    if (other.isSetDbname()) {
+      this.dbname = other.dbname;
+    }
+    if (other.isSetTablename()) {
+      this.tablename = other.tablename;
+    }
+    if (other.isSetPartitionname()) {
+      this.partitionname = other.partitionname;
+    }
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+    if (other.isSetRunas()) {
+      this.runas = other.runas;
+    }
+    if (other.isSetProperties()) {
+      Map<String,String> __this__properties = new HashMap<String,String>(other.properties);
+      this.properties = __this__properties;
+    }
+  }
+
+  public CompactionRequest deepCopy() {
+    return new CompactionRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbname = null;
+    this.tablename = null;
+    this.partitionname = null;
+    this.type = null;
+    this.runas = null;
+    this.properties = null;
+  }
+
+  public String getDbname() {
+    return this.dbname;
+  }
+
+  public void setDbname(String dbname) {
+    this.dbname = dbname;
+  }
+
+  public void unsetDbname() {
+    this.dbname = null;
+  }
+
+  /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbname() {
+    return this.dbname != null;
+  }
+
+  public void setDbnameIsSet(boolean value) {
+    if (!value) {
+      this.dbname = null;
+    }
+  }
+
+  public String getTablename() {
+    return this.tablename;
+  }
+
+  public void setTablename(String tablename) {
+    this.tablename = tablename;
+  }
+
+  public void unsetTablename() {
+    this.tablename = null;
+  }
+
+  /** Returns true if field tablename is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablename() {
+    return this.tablename != null;
+  }
+
+  public void setTablenameIsSet(boolean value) {
+    if (!value) {
+      this.tablename = null;
+    }
+  }
+
+  public String getPartitionname() {
+    return this.partitionname;
+  }
+
+  public void setPartitionname(String partitionname) {
+    this.partitionname = partitionname;
+  }
+
+  public void unsetPartitionname() {
+    this.partitionname = null;
+  }
+
+  /** Returns true if field partitionname is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionname() {
+    return this.partitionname != null;
+  }
+
+  public void setPartitionnameIsSet(boolean value) {
+    if (!value) {
+      this.partitionname = null;
+    }
+  }
+
+  /**
+   * 
+   * @see CompactionType
+   */
+  public CompactionType getType() {
+    return this.type;
+  }
+
+  /**
+   * 
+   * @see CompactionType
+   */
+  public void setType(CompactionType type) {
+    this.type = type;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public String getRunas() {
+    return this.runas;
+  }
+
+  public void setRunas(String runas) {
+    this.runas = runas;
+  }
+
+  public void unsetRunas() {
+    this.runas = null;
+  }
+
+  /** Returns true if field runas is set (has been assigned a value) and false otherwise */
+  public boolean isSetRunas() {
+    return this.runas != null;
+  }
+
+  public void setRunasIsSet(boolean value) {
+    if (!value) {
+      this.runas = null;
+    }
+  }
+
+  public int getPropertiesSize() {
+    return (this.properties == null) ? 0 : this.properties.size();
+  }
+
+  public void putToProperties(String key, String val) {
+    if (this.properties == null) {
+      this.properties = new HashMap<String,String>();
+    }
+    this.properties.put(key, val);
+  }
+
+  public Map<String,String> getProperties() {
+    return this.properties;
+  }
+
+  public void setProperties(Map<String,String> properties) {
+    this.properties = properties;
+  }
+
+  public void unsetProperties() {
+    this.properties = null;
+  }
+
+  /** Returns true if field properties is set (has been assigned a value) and false otherwise */
+  public boolean isSetProperties() {
+    return this.properties != null;
+  }
+
+  public void setPropertiesIsSet(boolean value) {
+    if (!value) {
+      this.properties = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DBNAME:
+      if (value == null) {
+        unsetDbname();
+      } else {
+        setDbname((String)value);
+      }
+      break;
+
+    case TABLENAME:
+      if (value == null) {
+        unsetTablename();
+      } else {
+        setTablename((String)value);
+      }
+      break;
+
+    case PARTITIONNAME:
+      if (value == null) {
+        unsetPartitionname();
+      } else {
+        setPartitionname((String)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((CompactionType)value);
+      }
+      break;
+
+    case RUNAS:
+      if (value == null) {
+        unsetRunas();
+      } else {
+        setRunas((String)value);
+      }
+      break;
+
+    case PROPERTIES:
+      if (value == null) {
+        unsetProperties();
+      } else {
+        setProperties((Map<String,String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DBNAME:
+      return getDbname();
+
+    case TABLENAME:
+      return getTablename();
+
+    case PARTITIONNAME:
+      return getPartitionname();
+
+    case TYPE:
+      return getType();
+
+    case RUNAS:
+      return getRunas();
+
+    case PROPERTIES:
+      return getProperties();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DBNAME:
+      return isSetDbname();
+    case TABLENAME:
+      return isSetTablename();
+    case PARTITIONNAME:
+      return isSetPartitionname();
+    case TYPE:
+      return isSetType();
+    case RUNAS:
+      return isSetRunas();
+    case PROPERTIES:
+      return isSetProperties();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CompactionRequest)
+      return this.equals((CompactionRequest)that);
+    return false;
+  }
+
+  public boolean equals(CompactionRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbname = true && this.isSetDbname();
+    boolean that_present_dbname = true && that.isSetDbname();
+    if (this_present_dbname || that_present_dbname) {
+      if (!(this_present_dbname && that_present_dbname))
+        return false;
+      if (!this.dbname.equals(that.dbname))
+        return false;
+    }
+
+    boolean this_present_tablename = true && this.isSetTablename();
+    boolean that_present_tablename = true && that.isSetTablename();
+    if (this_present_tablename || that_present_tablename) {
+      if (!(this_present_tablename && that_present_tablename))
+        return false;
+      if (!this.tablename.equals(that.tablename))
+        return false;
+    }
+
+    boolean this_present_partitionname = true && this.isSetPartitionname();
+    boolean that_present_partitionname = true && that.isSetPartitionname();
+    if (this_present_partitionname || that_present_partitionname) {
+      if (!(this_present_partitionname && that_present_partitionname))
+        return false;
+      if (!this.partitionname.equals(that.partitionname))
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    boolean this_present_runas = true && this.isSetRunas();
+    boolean that_present_runas = true && that.isSetRunas();
+    if (this_present_runas || that_present_runas) {
+      if (!(this_present_runas && that_present_runas))
+        return false;
+      if (!this.runas.equals(that.runas))
+        return false;
+    }
+
+    boolean this_present_properties = true && this.isSetProperties();
+    boolean that_present_properties = true && that.isSetProperties();
+    if (this_present_properties || that_present_properties) {
+      if (!(this_present_properties && that_present_properties))
+        return false;
+      if (!this.properties.equals(that.properties))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbname = true && (isSetDbname());
+    list.add(present_dbname);
+    if (present_dbname)
+      list.add(dbname);
+
+    boolean present_tablename = true && (isSetTablename());
+    list.add(present_tablename);
+    if (present_tablename)
+      list.add(tablename);
+
+    boolean present_partitionname = true && (isSetPartitionname());
+    list.add(present_partitionname);
+    if (present_partitionname)
+      list.add(partitionname);
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type.getValue());
+
+    boolean present_runas = true && (isSetRunas());
+    list.add(present_runas);
+    if (present_runas)
+      list.add(runas);
+
+    boolean present_properties = true && (isSetProperties());
+    list.add(present_properties);
+    if (present_properties)
+      list.add(properties);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CompactionRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTablename()).compareTo(other.isSetTablename());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablename()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename, other.tablename);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionname()).compareTo(other.isSetPartitionname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionname, other.partitionname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRunas()).compareTo(other.isSetRunas());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRunas()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.runas, other.runas);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetProperties()).compareTo(other.isSetProperties());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetProperties()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.properties, other.properties);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CompactionRequest(");
+    boolean first = true;
+
+    sb.append("dbname:");
+    if (this.dbname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbname);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tablename:");
+    if (this.tablename == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tablename);
+    }
+    first = false;
+    if (isSetPartitionname()) {
+      if (!first) sb.append(", ");
+      sb.append("partitionname:");
+      if (this.partitionname == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitionname);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("type:");
+    if (this.type == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.type);
+    }
+    first = false;
+    if (isSetRunas()) {
+      if (!first) sb.append(", ");
+      sb.append("runas:");
+      if (this.runas == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.runas);
+      }
+      first = false;
+    }
+    if (isSetProperties()) {
+      if (!first) sb.append(", ");
+      sb.append("properties:");
+      if (this.properties == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.properties);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbname' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTablename()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablename' is unset! Struct:" + toString());
+    }
+
+    if (!isSetType()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CompactionRequestStandardSchemeFactory implements SchemeFactory {
+    public CompactionRequestStandardScheme getScheme() {
+      return new CompactionRequestStandardScheme();
+    }
+  }
+
+  private static class CompactionRequestStandardScheme extends StandardScheme<CompactionRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DBNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbname = iprot.readString();
+              struct.setDbnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLENAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tablename = iprot.readString();
+              struct.setTablenameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PARTITIONNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.partitionname = iprot.readString();
+              struct.setPartitionnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32());
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // RUNAS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.runas = iprot.readString();
+              struct.setRunasIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // PROPERTIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map690 = iprot.readMapBegin();
+                struct.properties = new HashMap<String,String>(2*_map690.size);
+                String _key691;
+                String _val692;
+                for (int _i693 = 0; _i693 < _map690.size; ++_i693)
+                {
+                  _key691 = iprot.readString();
+                  _val692 = iprot.readString();
+                  struct.properties.put(_key691, _val692);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setPropertiesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbname != null) {
+        oprot.writeFieldBegin(DBNAME_FIELD_DESC);
+        oprot.writeString(struct.dbname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tablename != null) {
+        oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
+        oprot.writeString(struct.tablename);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partitionname != null) {
+        if (struct.isSetPartitionname()) {
+          oprot.writeFieldBegin(PARTITIONNAME_FIELD_DESC);
+          oprot.writeString(struct.partitionname);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.type != null) {
+        oprot.writeFieldBegin(TYPE_FIELD_DESC);
+        oprot.writeI32(struct.type.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.runas != null) {
+        if (struct.isSetRunas()) {
+          oprot.writeFieldBegin(RUNAS_FIELD_DESC);
+          oprot.writeString(struct.runas);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.properties != null) {
+        if (struct.isSetProperties()) {
+          oprot.writeFieldBegin(PROPERTIES_FIELD_DESC);
+          {
+            oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size()));
+            for (Map.Entry<String, String> _iter694 : struct.properties.entrySet())
+            {
+              oprot.writeString(_iter694.getKey());
+              oprot.writeString(_iter694.getValue());
+            }
+            oprot.writeMapEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CompactionRequestTupleSchemeFactory implements SchemeFactory {
+    public CompactionRequestTupleScheme getScheme() {
+      return new CompactionRequestTupleScheme();
+    }
+  }
+
+  private static class CompactionRequestTupleScheme extends TupleScheme<CompactionRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CompactionRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbname);
+      oprot.writeString(struct.tablename);
+      oprot.writeI32(struct.type.getValue());
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartitionname()) {
+        optionals.set(0);
+      }
+      if (struct.isSetRunas()) {
+        optionals.set(1);
+      }
+      if (struct.isSetProperties()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetPartitionname()) {
+        oprot.writeString(struct.partitionname);
+      }
+      if (struct.isSetRunas()) {
+        oprot.writeString(struct.runas);
+      }
+      if (struct.isSetProperties()) {
+        {
+          oprot.writeI32(struct.properties.size());
+          for (Map.Entry<String, String> _iter695 : struct.properties.entrySet())
+          {
+            oprot.writeString(_iter695.getKey());
+            oprot.writeString(_iter695.getValue());
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CompactionRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbname = iprot.readString();
+      struct.setDbnameIsSet(true);
+      struct.tablename = iprot.readString();
+      struct.setTablenameIsSet(true);
+      struct.type = org.apache.hadoop.hive.metastore.api.CompactionType.findByValue(iprot.readI32());
+      struct.setTypeIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.partitionname = iprot.readString();
+        struct.setPartitionnameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.runas = iprot.readString();
+        struct.setRunasIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TMap _map696 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.properties = new HashMap<String,String>(2*_map696.size);
+          String _key697;
+          String _val698;
+          for (int _i699 = 0; _i699 < _map696.size; ++_i699)
+          {
+            _key697 = iprot.readString();
+            _val698 = iprot.readString();
+            struct.properties.put(_key697, _val698);
+          }
+        }
+        struct.setPropertiesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionResponse.java
new file mode 100644
index 0000000..f9c6955
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionResponse.java
@@ -0,0 +1,583 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CompactionResponse implements org.apache.thrift.TBase<CompactionResponse, CompactionResponse._Fields>, java.io.Serializable, Cloneable, Comparable<CompactionResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CompactionResponse");
+
+  private static final org.apache.thrift.protocol.TField ID_FIELD_DESC = new org.apache.thrift.protocol.TField("id", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("state", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField ACCEPTED_FIELD_DESC = new org.apache.thrift.protocol.TField("accepted", org.apache.thrift.protocol.TType.BOOL, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CompactionResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CompactionResponseTupleSchemeFactory());
+  }
+
+  private long id; // required
+  private String state; // required
+  private boolean accepted; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ID((short)1, "id"),
+    STATE((short)2, "state"),
+    ACCEPTED((short)3, "accepted");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ID
+          return ID;
+        case 2: // STATE
+          return STATE;
+        case 3: // ACCEPTED
+          return ACCEPTED;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ID_ISSET_ID = 0;
+  private static final int __ACCEPTED_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ID, new org.apache.thrift.meta_data.FieldMetaData("id", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.STATE, new org.apache.thrift.meta_data.FieldMetaData("state", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ACCEPTED, new org.apache.thrift.meta_data.FieldMetaData("accepted", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CompactionResponse.class, metaDataMap);
+  }
+
+  public CompactionResponse() {
+  }
+
+  public CompactionResponse(
+    long id,
+    String state,
+    boolean accepted)
+  {
+    this();
+    this.id = id;
+    setIdIsSet(true);
+    this.state = state;
+    this.accepted = accepted;
+    setAcceptedIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CompactionResponse(CompactionResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.id = other.id;
+    if (other.isSetState()) {
+      this.state = other.state;
+    }
+    this.accepted = other.accepted;
+  }
+
+  public CompactionResponse deepCopy() {
+    return new CompactionResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setIdIsSet(false);
+    this.id = 0;
+    this.state = null;
+    setAcceptedIsSet(false);
+    this.accepted = false;
+  }
+
+  public long getId() {
+    return this.id;
+  }
+
+  public void setId(long id) {
+    this.id = id;
+    setIdIsSet(true);
+  }
+
+  public void unsetId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ID_ISSET_ID);
+  }
+
+  /** Returns true if field id is set (has been assigned a value) and false otherwise */
+  public boolean isSetId() {
+    return EncodingUtils.testBit(__isset_bitfield, __ID_ISSET_ID);
+  }
+
+  public void setIdIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ID_ISSET_ID, value);
+  }
+
+  public String getState() {
+    return this.state;
+  }
+
+  public void setState(String state) {
+    this.state = state;
+  }
+
+  public void unsetState() {
+    this.state = null;
+  }
+
+  /** Returns true if field state is set (has been assigned a value) and false otherwise */
+  public boolean isSetState() {
+    return this.state != null;
+  }
+
+  public void setStateIsSet(boolean value) {
+    if (!value) {
+      this.state = null;
+    }
+  }
+
+  public boolean isAccepted() {
+    return this.accepted;
+  }
+
+  public void setAccepted(boolean accepted) {
+    this.accepted = accepted;
+    setAcceptedIsSet(true);
+  }
+
+  public void unsetAccepted() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ACCEPTED_ISSET_ID);
+  }
+
+  /** Returns true if field accepted is set (has been assigned a value) and false otherwise */
+  public boolean isSetAccepted() {
+    return EncodingUtils.testBit(__isset_bitfield, __ACCEPTED_ISSET_ID);
+  }
+
+  public void setAcceptedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ACCEPTED_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ID:
+      if (value == null) {
+        unsetId();
+      } else {
+        setId((Long)value);
+      }
+      break;
+
+    case STATE:
+      if (value == null) {
+        unsetState();
+      } else {
+        setState((String)value);
+      }
+      break;
+
+    case ACCEPTED:
+      if (value == null) {
+        unsetAccepted();
+      } else {
+        setAccepted((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ID:
+      return getId();
+
+    case STATE:
+      return getState();
+
+    case ACCEPTED:
+      return isAccepted();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ID:
+      return isSetId();
+    case STATE:
+      return isSetState();
+    case ACCEPTED:
+      return isSetAccepted();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CompactionResponse)
+      return this.equals((CompactionResponse)that);
+    return false;
+  }
+
+  public boolean equals(CompactionResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_id = true;
+    boolean that_present_id = true;
+    if (this_present_id || that_present_id) {
+      if (!(this_present_id && that_present_id))
+        return false;
+      if (this.id != that.id)
+        return false;
+    }
+
+    boolean this_present_state = true && this.isSetState();
+    boolean that_present_state = true && that.isSetState();
+    if (this_present_state || that_present_state) {
+      if (!(this_present_state && that_present_state))
+        return false;
+      if (!this.state.equals(that.state))
+        return false;
+    }
+
+    boolean this_present_accepted = true;
+    boolean that_present_accepted = true;
+    if (this_present_accepted || that_present_accepted) {
+      if (!(this_present_accepted && that_present_accepted))
+        return false;
+      if (this.accepted != that.accepted)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_id = true;
+    list.add(present_id);
+    if (present_id)
+      list.add(id);
+
+    boolean present_state = true && (isSetState());
+    list.add(present_state);
+    if (present_state)
+      list.add(state);
+
+    boolean present_accepted = true;
+    list.add(present_accepted);
+    if (present_accepted)
+      list.add(accepted);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CompactionResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetId()).compareTo(other.isSetId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.id, other.id);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetState()).compareTo(other.isSetState());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetState()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.state, other.state);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAccepted()).compareTo(other.isSetAccepted());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAccepted()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.accepted, other.accepted);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CompactionResponse(");
+    boolean first = true;
+
+    sb.append("id:");
+    sb.append(this.id);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("state:");
+    if (this.state == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.state);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("accepted:");
+    sb.append(this.accepted);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetId()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'id' is unset! Struct:" + toString());
+    }
+
+    if (!isSetState()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'state' is unset! Struct:" + toString());
+    }
+
+    if (!isSetAccepted()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'accepted' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CompactionResponseStandardSchemeFactory implements SchemeFactory {
+    public CompactionResponseStandardScheme getScheme() {
+      return new CompactionResponseStandardScheme();
+    }
+  }
+
+  private static class CompactionResponseStandardScheme extends StandardScheme<CompactionResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CompactionResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.id = iprot.readI64();
+              struct.setIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // STATE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.state = iprot.readString();
+              struct.setStateIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // ACCEPTED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.accepted = iprot.readBool();
+              struct.setAcceptedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CompactionResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(ID_FIELD_DESC);
+      oprot.writeI64(struct.id);
+      oprot.writeFieldEnd();
+      if (struct.state != null) {
+        oprot.writeFieldBegin(STATE_FIELD_DESC);
+        oprot.writeString(struct.state);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(ACCEPTED_FIELD_DESC);
+      oprot.writeBool(struct.accepted);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CompactionResponseTupleSchemeFactory implements SchemeFactory {
+    public CompactionResponseTupleScheme getScheme() {
+      return new CompactionResponseTupleScheme();
+    }
+  }
+
+  private static class CompactionResponseTupleScheme extends TupleScheme<CompactionResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CompactionResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.id);
+      oprot.writeString(struct.state);
+      oprot.writeBool(struct.accepted);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CompactionResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.id = iprot.readI64();
+      struct.setIdIsSet(true);
+      struct.state = iprot.readString();
+      struct.setStateIsSet(true);
+      struct.accepted = iprot.readBool();
+      struct.setAcceptedIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java
new file mode 100644
index 0000000..7450b27
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CompactionType.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum CompactionType implements org.apache.thrift.TEnum {
+  MINOR(1),
+  MAJOR(2);
+
+  private final int value;
+
+  private CompactionType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static CompactionType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return MINOR;
+      case 2:
+        return MAJOR;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
new file mode 100644
index 0000000..c4837a1
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ConfigValSecurityException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ConfigValSecurityException extends TException implements org.apache.thrift.TBase<ConfigValSecurityException, ConfigValSecurityException._Fields>, java.io.Serializable, Cloneable, Comparable<ConfigValSecurityException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ConfigValSecurityException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ConfigValSecurityExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ConfigValSecurityExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ConfigValSecurityException.class, metaDataMap);
+  }
+
+  public ConfigValSecurityException() {
+  }
+
+  public ConfigValSecurityException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ConfigValSecurityException(ConfigValSecurityException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public ConfigValSecurityException deepCopy() {
+    return new ConfigValSecurityException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ConfigValSecurityException)
+      return this.equals((ConfigValSecurityException)that);
+    return false;
+  }
+
+  public boolean equals(ConfigValSecurityException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ConfigValSecurityException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ConfigValSecurityException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ConfigValSecurityExceptionStandardSchemeFactory implements SchemeFactory {
+    public ConfigValSecurityExceptionStandardScheme getScheme() {
+      return new ConfigValSecurityExceptionStandardScheme();
+    }
+  }
+
+  private static class ConfigValSecurityExceptionStandardScheme extends StandardScheme<ConfigValSecurityException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ConfigValSecurityException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ConfigValSecurityException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ConfigValSecurityExceptionTupleSchemeFactory implements SchemeFactory {
+    public ConfigValSecurityExceptionTupleScheme getScheme() {
+      return new ConfigValSecurityExceptionTupleScheme();
+    }
+  }
+
+  private static class ConfigValSecurityExceptionTupleScheme extends TupleScheme<ConfigValSecurityException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ConfigValSecurityException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ConfigValSecurityException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java
new file mode 100644
index 0000000..c260b3d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreateCatalogRequest.java
@@ -0,0 +1,400 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreateCatalogRequest implements org.apache.thrift.TBase<CreateCatalogRequest, CreateCatalogRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CreateCatalogRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreateCatalogRequest");
+
+  private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CreateCatalogRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CreateCatalogRequestTupleSchemeFactory());
+  }
+
+  private Catalog catalog; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CATALOG((short)1, "catalog");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CATALOG
+          return CATALOG;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CreateCatalogRequest.class, metaDataMap);
+  }
+
+  public CreateCatalogRequest() {
+  }
+
+  public CreateCatalogRequest(
+    Catalog catalog)
+  {
+    this();
+    this.catalog = catalog;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CreateCatalogRequest(CreateCatalogRequest other) {
+    if (other.isSetCatalog()) {
+      this.catalog = new Catalog(other.catalog);
+    }
+  }
+
+  public CreateCatalogRequest deepCopy() {
+    return new CreateCatalogRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catalog = null;
+  }
+
+  public Catalog getCatalog() {
+    return this.catalog;
+  }
+
+  public void setCatalog(Catalog catalog) {
+    this.catalog = catalog;
+  }
+
+  public void unsetCatalog() {
+    this.catalog = null;
+  }
+
+  /** Returns true if field catalog is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatalog() {
+    return this.catalog != null;
+  }
+
+  public void setCatalogIsSet(boolean value) {
+    if (!value) {
+      this.catalog = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CATALOG:
+      if (value == null) {
+        unsetCatalog();
+      } else {
+        setCatalog((Catalog)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CATALOG:
+      return getCatalog();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CATALOG:
+      return isSetCatalog();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CreateCatalogRequest)
+      return this.equals((CreateCatalogRequest)that);
+    return false;
+  }
+
+  public boolean equals(CreateCatalogRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catalog = true && this.isSetCatalog();
+    boolean that_present_catalog = true && that.isSetCatalog();
+    if (this_present_catalog || that_present_catalog) {
+      if (!(this_present_catalog && that_present_catalog))
+        return false;
+      if (!this.catalog.equals(that.catalog))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catalog = true && (isSetCatalog());
+    list.add(present_catalog);
+    if (present_catalog)
+      list.add(catalog);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CreateCatalogRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatalog()).compareTo(other.isSetCatalog());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatalog()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CreateCatalogRequest(");
+    boolean first = true;
+
+    sb.append("catalog:");
+    if (this.catalog == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catalog);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (catalog != null) {
+      catalog.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CreateCatalogRequestStandardSchemeFactory implements SchemeFactory {
+    public CreateCatalogRequestStandardScheme getScheme() {
+      return new CreateCatalogRequestStandardScheme();
+    }
+  }
+
+  private static class CreateCatalogRequestStandardScheme extends StandardScheme<CreateCatalogRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CreateCatalogRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CATALOG
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.catalog = new Catalog();
+              struct.catalog.read(iprot);
+              struct.setCatalogIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CreateCatalogRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catalog != null) {
+        oprot.writeFieldBegin(CATALOG_FIELD_DESC);
+        struct.catalog.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CreateCatalogRequestTupleSchemeFactory implements SchemeFactory {
+    public CreateCatalogRequestTupleScheme getScheme() {
+      return new CreateCatalogRequestTupleScheme();
+    }
+  }
+
+  private static class CreateCatalogRequestTupleScheme extends TupleScheme<CreateCatalogRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CreateCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatalog()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCatalog()) {
+        struct.catalog.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CreateCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.catalog = new Catalog();
+        struct.catalog.read(iprot);
+        struct.setCatalogIsSet(true);
+      }
+    }
+  }
+
+}
+


[76/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
index 0000000,0348ff2..86b469c
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/hive_metastore_types.rb
@@@ -1,0 -1,5322 +1,5419 @@@
+ #
+ # Autogenerated by Thrift Compiler (0.9.3)
+ #
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ #
+ 
+ require 'thrift'
+ require 'fb303_types'
+ 
+ 
+ module HiveObjectType
+   GLOBAL = 1
+   DATABASE = 2
+   TABLE = 3
+   PARTITION = 4
+   COLUMN = 5
+   VALUE_MAP = {1 => "GLOBAL", 2 => "DATABASE", 3 => "TABLE", 4 => "PARTITION", 5 => "COLUMN"}
+   VALID_VALUES = Set.new([GLOBAL, DATABASE, TABLE, PARTITION, COLUMN]).freeze
+ end
+ 
+ module PrincipalType
+   USER = 1
+   ROLE = 2
+   GROUP = 3
+   VALUE_MAP = {1 => "USER", 2 => "ROLE", 3 => "GROUP"}
+   VALID_VALUES = Set.new([USER, ROLE, GROUP]).freeze
+ end
+ 
+ module PartitionEventType
+   LOAD_DONE = 1
+   VALUE_MAP = {1 => "LOAD_DONE"}
+   VALID_VALUES = Set.new([LOAD_DONE]).freeze
+ end
+ 
+ module TxnState
+   COMMITTED = 1
+   ABORTED = 2
+   OPEN = 3
+   VALUE_MAP = {1 => "COMMITTED", 2 => "ABORTED", 3 => "OPEN"}
+   VALID_VALUES = Set.new([COMMITTED, ABORTED, OPEN]).freeze
+ end
+ 
+ module LockLevel
+   DB = 1
+   TABLE = 2
+   PARTITION = 3
+   VALUE_MAP = {1 => "DB", 2 => "TABLE", 3 => "PARTITION"}
+   VALID_VALUES = Set.new([DB, TABLE, PARTITION]).freeze
+ end
+ 
+ module LockState
+   ACQUIRED = 1
+   WAITING = 2
+   ABORT = 3
+   NOT_ACQUIRED = 4
+   VALUE_MAP = {1 => "ACQUIRED", 2 => "WAITING", 3 => "ABORT", 4 => "NOT_ACQUIRED"}
+   VALID_VALUES = Set.new([ACQUIRED, WAITING, ABORT, NOT_ACQUIRED]).freeze
+ end
+ 
+ module LockType
+   SHARED_READ = 1
+   SHARED_WRITE = 2
+   EXCLUSIVE = 3
+   VALUE_MAP = {1 => "SHARED_READ", 2 => "SHARED_WRITE", 3 => "EXCLUSIVE"}
+   VALID_VALUES = Set.new([SHARED_READ, SHARED_WRITE, EXCLUSIVE]).freeze
+ end
+ 
+ module CompactionType
+   MINOR = 1
+   MAJOR = 2
+   VALUE_MAP = {1 => "MINOR", 2 => "MAJOR"}
+   VALID_VALUES = Set.new([MINOR, MAJOR]).freeze
+ end
+ 
+ module GrantRevokeType
+   GRANT = 1
+   REVOKE = 2
+   VALUE_MAP = {1 => "GRANT", 2 => "REVOKE"}
+   VALID_VALUES = Set.new([GRANT, REVOKE]).freeze
+ end
+ 
+ module DataOperationType
+   SELECT = 1
+   INSERT = 2
+   UPDATE = 3
+   DELETE = 4
+   UNSET = 5
+   NO_TXN = 6
+   VALUE_MAP = {1 => "SELECT", 2 => "INSERT", 3 => "UPDATE", 4 => "DELETE", 5 => "UNSET", 6 => "NO_TXN"}
+   VALID_VALUES = Set.new([SELECT, INSERT, UPDATE, DELETE, UNSET, NO_TXN]).freeze
+ end
+ 
+ module EventRequestType
+   INSERT = 1
+   UPDATE = 2
+   DELETE = 3
+   VALUE_MAP = {1 => "INSERT", 2 => "UPDATE", 3 => "DELETE"}
+   VALID_VALUES = Set.new([INSERT, UPDATE, DELETE]).freeze
+ end
+ 
+ module SerdeType
+   HIVE = 1
+   SCHEMA_REGISTRY = 2
+   VALUE_MAP = {1 => "HIVE", 2 => "SCHEMA_REGISTRY"}
+   VALID_VALUES = Set.new([HIVE, SCHEMA_REGISTRY]).freeze
+ end
+ 
+ module SchemaType
+   HIVE = 1
+   AVRO = 2
+   VALUE_MAP = {1 => "HIVE", 2 => "AVRO"}
+   VALID_VALUES = Set.new([HIVE, AVRO]).freeze
+ end
+ 
+ module SchemaCompatibility
+   NONE = 1
+   BACKWARD = 2
+   FORWARD = 3
+   BOTH = 4
+   VALUE_MAP = {1 => "NONE", 2 => "BACKWARD", 3 => "FORWARD", 4 => "BOTH"}
+   VALID_VALUES = Set.new([NONE, BACKWARD, FORWARD, BOTH]).freeze
+ end
+ 
+ module SchemaValidation
+   LATEST = 1
+   ALL = 2
+   VALUE_MAP = {1 => "LATEST", 2 => "ALL"}
+   VALID_VALUES = Set.new([LATEST, ALL]).freeze
+ end
+ 
+ module SchemaVersionState
+   INITIATED = 1
+   START_REVIEW = 2
+   CHANGES_REQUIRED = 3
+   REVIEWED = 4
+   ENABLED = 5
+   DISABLED = 6
+   ARCHIVED = 7
+   DELETED = 8
+   VALUE_MAP = {1 => "INITIATED", 2 => "START_REVIEW", 3 => "CHANGES_REQUIRED", 4 => "REVIEWED", 5 => "ENABLED", 6 => "DISABLED", 7 => "ARCHIVED", 8 => "DELETED"}
+   VALID_VALUES = Set.new([INITIATED, START_REVIEW, CHANGES_REQUIRED, REVIEWED, ENABLED, DISABLED, ARCHIVED, DELETED]).freeze
+ end
+ 
+ module FunctionType
+   JAVA = 1
+   VALUE_MAP = {1 => "JAVA"}
+   VALID_VALUES = Set.new([JAVA]).freeze
+ end
+ 
+ module ResourceType
+   JAR = 1
+   FILE = 2
+   ARCHIVE = 3
+   VALUE_MAP = {1 => "JAR", 2 => "FILE", 3 => "ARCHIVE"}
+   VALID_VALUES = Set.new([JAR, FILE, ARCHIVE]).freeze
+ end
+ 
+ module FileMetadataExprType
+   ORC_SARG = 1
+   VALUE_MAP = {1 => "ORC_SARG"}
+   VALID_VALUES = Set.new([ORC_SARG]).freeze
+ end
+ 
+ module ClientCapability
+   TEST_CAPABILITY = 1
+   INSERT_ONLY_TABLES = 2
+   VALUE_MAP = {1 => "TEST_CAPABILITY", 2 => "INSERT_ONLY_TABLES"}
+   VALID_VALUES = Set.new([TEST_CAPABILITY, INSERT_ONLY_TABLES]).freeze
+ end
+ 
+ module WMResourcePlanStatus
+   ACTIVE = 1
+   ENABLED = 2
+   DISABLED = 3
+   VALUE_MAP = {1 => "ACTIVE", 2 => "ENABLED", 3 => "DISABLED"}
+   VALID_VALUES = Set.new([ACTIVE, ENABLED, DISABLED]).freeze
+ end
+ 
+ module WMPoolSchedulingPolicy
+   FAIR = 1
+   FIFO = 2
+   VALUE_MAP = {1 => "FAIR", 2 => "FIFO"}
+   VALID_VALUES = Set.new([FAIR, FIFO]).freeze
+ end
+ 
+ class Version
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   VERSION = 1
+   COMMENTS = 2
+ 
+   FIELDS = {
+     VERSION => {:type => ::Thrift::Types::STRING, :name => 'version'},
+     COMMENTS => {:type => ::Thrift::Types::STRING, :name => 'comments'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class FieldSchema
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+   TYPE = 2
+   COMMENT = 3
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+     TYPE => {:type => ::Thrift::Types::STRING, :name => 'type'},
+     COMMENT => {:type => ::Thrift::Types::STRING, :name => 'comment'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SQLPrimaryKey
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   TABLE_DB = 1
+   TABLE_NAME = 2
+   COLUMN_NAME = 3
+   KEY_SEQ = 4
+   PK_NAME = 5
+   ENABLE_CSTR = 6
+   VALIDATE_CSTR = 7
+   RELY_CSTR = 8
+   CATNAME = 9
+ 
+   FIELDS = {
+     TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'},
+     TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'},
+     COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'},
+     KEY_SEQ => {:type => ::Thrift::Types::I32, :name => 'key_seq'},
+     PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'},
+     ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'},
+     VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'},
+     RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SQLForeignKey
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PKTABLE_DB = 1
+   PKTABLE_NAME = 2
+   PKCOLUMN_NAME = 3
+   FKTABLE_DB = 4
+   FKTABLE_NAME = 5
+   FKCOLUMN_NAME = 6
+   KEY_SEQ = 7
+   UPDATE_RULE = 8
+   DELETE_RULE = 9
+   FK_NAME = 10
+   PK_NAME = 11
+   ENABLE_CSTR = 12
+   VALIDATE_CSTR = 13
+   RELY_CSTR = 14
+   CATNAME = 15
+ 
+   FIELDS = {
+     PKTABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'pktable_db'},
+     PKTABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'pktable_name'},
+     PKCOLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'pkcolumn_name'},
+     FKTABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'fktable_db'},
+     FKTABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'fktable_name'},
+     FKCOLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'fkcolumn_name'},
+     KEY_SEQ => {:type => ::Thrift::Types::I32, :name => 'key_seq'},
+     UPDATE_RULE => {:type => ::Thrift::Types::I32, :name => 'update_rule'},
+     DELETE_RULE => {:type => ::Thrift::Types::I32, :name => 'delete_rule'},
+     FK_NAME => {:type => ::Thrift::Types::STRING, :name => 'fk_name'},
+     PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'},
+     ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'},
+     VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'},
+     RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SQLUniqueConstraint
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   TABLE_DB = 2
+   TABLE_NAME = 3
+   COLUMN_NAME = 4
+   KEY_SEQ = 5
+   UK_NAME = 6
+   ENABLE_CSTR = 7
+   VALIDATE_CSTR = 8
+   RELY_CSTR = 9
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'},
+     TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'},
+     COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'},
+     KEY_SEQ => {:type => ::Thrift::Types::I32, :name => 'key_seq'},
+     UK_NAME => {:type => ::Thrift::Types::STRING, :name => 'uk_name'},
+     ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'},
+     VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'},
+     RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SQLNotNullConstraint
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   TABLE_DB = 2
+   TABLE_NAME = 3
+   COLUMN_NAME = 4
+   NN_NAME = 5
+   ENABLE_CSTR = 6
+   VALIDATE_CSTR = 7
+   RELY_CSTR = 8
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'},
+     TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'},
+     COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'},
+     NN_NAME => {:type => ::Thrift::Types::STRING, :name => 'nn_name'},
+     ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'},
+     VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'},
+     RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SQLDefaultConstraint
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   TABLE_DB = 2
+   TABLE_NAME = 3
+   COLUMN_NAME = 4
+   DEFAULT_VALUE = 5
+   DC_NAME = 6
+   ENABLE_CSTR = 7
+   VALIDATE_CSTR = 8
+   RELY_CSTR = 9
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'},
+     TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'},
+     COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'},
+     DEFAULT_VALUE => {:type => ::Thrift::Types::STRING, :name => 'default_value'},
+     DC_NAME => {:type => ::Thrift::Types::STRING, :name => 'dc_name'},
+     ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'},
+     VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'},
+     RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SQLCheckConstraint
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   TABLE_DB = 2
+   TABLE_NAME = 3
+   COLUMN_NAME = 4
+   CHECK_EXPRESSION = 5
+   DC_NAME = 6
+   ENABLE_CSTR = 7
+   VALIDATE_CSTR = 8
+   RELY_CSTR = 9
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     TABLE_DB => {:type => ::Thrift::Types::STRING, :name => 'table_db'},
+     TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'},
+     COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'},
+     CHECK_EXPRESSION => {:type => ::Thrift::Types::STRING, :name => 'check_expression'},
+     DC_NAME => {:type => ::Thrift::Types::STRING, :name => 'dc_name'},
+     ENABLE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'enable_cstr'},
+     VALIDATE_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'validate_cstr'},
+     RELY_CSTR => {:type => ::Thrift::Types::BOOL, :name => 'rely_cstr'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Type
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+   TYPE1 = 2
+   TYPE2 = 3
+   FIELDS = 4
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+     TYPE1 => {:type => ::Thrift::Types::STRING, :name => 'type1', :optional => true},
+     TYPE2 => {:type => ::Thrift::Types::STRING, :name => 'type2', :optional => true},
+     FIELDS => {:type => ::Thrift::Types::LIST, :name => 'fields', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class HiveObjectRef
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   OBJECTTYPE = 1
+   DBNAME = 2
+   OBJECTNAME = 3
+   PARTVALUES = 4
+   COLUMNNAME = 5
+   CATNAME = 6
+ 
+   FIELDS = {
+     OBJECTTYPE => {:type => ::Thrift::Types::I32, :name => 'objectType', :enum_class => ::HiveObjectType},
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     OBJECTNAME => {:type => ::Thrift::Types::STRING, :name => 'objectName'},
+     PARTVALUES => {:type => ::Thrift::Types::LIST, :name => 'partValues', :element => {:type => ::Thrift::Types::STRING}},
+     COLUMNNAME => {:type => ::Thrift::Types::STRING, :name => 'columnName'},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @objectType.nil? || ::HiveObjectType::VALID_VALUES.include?(@objectType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field objectType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PrivilegeGrantInfo
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRIVILEGE = 1
+   CREATETIME = 2
+   GRANTOR = 3
+   GRANTORTYPE = 4
+   GRANTOPTION = 5
+ 
+   FIELDS = {
+     PRIVILEGE => {:type => ::Thrift::Types::STRING, :name => 'privilege'},
+     CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'},
+     GRANTOR => {:type => ::Thrift::Types::STRING, :name => 'grantor'},
+     GRANTORTYPE => {:type => ::Thrift::Types::I32, :name => 'grantorType', :enum_class => ::PrincipalType},
+     GRANTOPTION => {:type => ::Thrift::Types::BOOL, :name => 'grantOption'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @grantorType.nil? || ::PrincipalType::VALID_VALUES.include?(@grantorType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field grantorType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class HiveObjectPrivilege
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   HIVEOBJECT = 1
+   PRINCIPALNAME = 2
+   PRINCIPALTYPE = 3
+   GRANTINFO = 4
+   AUTHORIZER = 5
+ 
+   FIELDS = {
+     HIVEOBJECT => {:type => ::Thrift::Types::STRUCT, :name => 'hiveObject', :class => ::HiveObjectRef},
+     PRINCIPALNAME => {:type => ::Thrift::Types::STRING, :name => 'principalName'},
+     PRINCIPALTYPE => {:type => ::Thrift::Types::I32, :name => 'principalType', :enum_class => ::PrincipalType},
+     GRANTINFO => {:type => ::Thrift::Types::STRUCT, :name => 'grantInfo', :class => ::PrivilegeGrantInfo},
+     AUTHORIZER => {:type => ::Thrift::Types::STRING, :name => 'authorizer'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @principalType.nil? || ::PrincipalType::VALID_VALUES.include?(@principalType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field principalType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PrivilegeBag
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRIVILEGES = 1
+ 
+   FIELDS = {
+     PRIVILEGES => {:type => ::Thrift::Types::LIST, :name => 'privileges', :element => {:type => ::Thrift::Types::STRUCT, :class => ::HiveObjectPrivilege}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PrincipalPrivilegeSet
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   USERPRIVILEGES = 1
+   GROUPPRIVILEGES = 2
+   ROLEPRIVILEGES = 3
+ 
+   FIELDS = {
+     USERPRIVILEGES => {:type => ::Thrift::Types::MAP, :name => 'userPrivileges', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::PrivilegeGrantInfo}}},
+     GROUPPRIVILEGES => {:type => ::Thrift::Types::MAP, :name => 'groupPrivileges', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::PrivilegeGrantInfo}}},
+     ROLEPRIVILEGES => {:type => ::Thrift::Types::MAP, :name => 'rolePrivileges', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::PrivilegeGrantInfo}}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GrantRevokePrivilegeRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   REQUESTTYPE = 1
+   PRIVILEGES = 2
+   REVOKEGRANTOPTION = 3
+ 
+   FIELDS = {
+     REQUESTTYPE => {:type => ::Thrift::Types::I32, :name => 'requestType', :enum_class => ::GrantRevokeType},
+     PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrivilegeBag},
+     REVOKEGRANTOPTION => {:type => ::Thrift::Types::BOOL, :name => 'revokeGrantOption', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @requestType.nil? || ::GrantRevokeType::VALID_VALUES.include?(@requestType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field requestType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GrantRevokePrivilegeResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   SUCCESS = 1
+ 
+   FIELDS = {
+     SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Role
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   ROLENAME = 1
+   CREATETIME = 2
+   OWNERNAME = 3
+ 
+   FIELDS = {
+     ROLENAME => {:type => ::Thrift::Types::STRING, :name => 'roleName'},
+     CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'},
+     OWNERNAME => {:type => ::Thrift::Types::STRING, :name => 'ownerName'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class RolePrincipalGrant
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   ROLENAME = 1
+   PRINCIPALNAME = 2
+   PRINCIPALTYPE = 3
+   GRANTOPTION = 4
+   GRANTTIME = 5
+   GRANTORNAME = 6
+   GRANTORPRINCIPALTYPE = 7
+ 
+   FIELDS = {
+     ROLENAME => {:type => ::Thrift::Types::STRING, :name => 'roleName'},
+     PRINCIPALNAME => {:type => ::Thrift::Types::STRING, :name => 'principalName'},
+     PRINCIPALTYPE => {:type => ::Thrift::Types::I32, :name => 'principalType', :enum_class => ::PrincipalType},
+     GRANTOPTION => {:type => ::Thrift::Types::BOOL, :name => 'grantOption'},
+     GRANTTIME => {:type => ::Thrift::Types::I32, :name => 'grantTime'},
+     GRANTORNAME => {:type => ::Thrift::Types::STRING, :name => 'grantorName'},
+     GRANTORPRINCIPALTYPE => {:type => ::Thrift::Types::I32, :name => 'grantorPrincipalType', :enum_class => ::PrincipalType}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @principalType.nil? || ::PrincipalType::VALID_VALUES.include?(@principalType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field principalType!')
+     end
+     unless @grantorPrincipalType.nil? || ::PrincipalType::VALID_VALUES.include?(@grantorPrincipalType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field grantorPrincipalType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetRoleGrantsForPrincipalRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRINCIPAL_NAME = 1
+   PRINCIPAL_TYPE = 2
+ 
+   FIELDS = {
+     PRINCIPAL_NAME => {:type => ::Thrift::Types::STRING, :name => 'principal_name'},
+     PRINCIPAL_TYPE => {:type => ::Thrift::Types::I32, :name => 'principal_type', :enum_class => ::PrincipalType}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field principal_name is unset!') unless @principal_name
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field principal_type is unset!') unless @principal_type
+     unless @principal_type.nil? || ::PrincipalType::VALID_VALUES.include?(@principal_type)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field principal_type!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetRoleGrantsForPrincipalResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRINCIPALGRANTS = 1
+ 
+   FIELDS = {
+     PRINCIPALGRANTS => {:type => ::Thrift::Types::LIST, :name => 'principalGrants', :element => {:type => ::Thrift::Types::STRUCT, :class => ::RolePrincipalGrant}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field principalGrants is unset!') unless @principalGrants
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetPrincipalsInRoleRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   ROLENAME = 1
+ 
+   FIELDS = {
+     ROLENAME => {:type => ::Thrift::Types::STRING, :name => 'roleName'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field roleName is unset!') unless @roleName
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetPrincipalsInRoleResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRINCIPALGRANTS = 1
+ 
+   FIELDS = {
+     PRINCIPALGRANTS => {:type => ::Thrift::Types::LIST, :name => 'principalGrants', :element => {:type => ::Thrift::Types::STRUCT, :class => ::RolePrincipalGrant}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field principalGrants is unset!') unless @principalGrants
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GrantRevokeRoleRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   REQUESTTYPE = 1
+   ROLENAME = 2
+   PRINCIPALNAME = 3
+   PRINCIPALTYPE = 4
+   GRANTOR = 5
+   GRANTORTYPE = 6
+   GRANTOPTION = 7
+ 
+   FIELDS = {
+     REQUESTTYPE => {:type => ::Thrift::Types::I32, :name => 'requestType', :enum_class => ::GrantRevokeType},
+     ROLENAME => {:type => ::Thrift::Types::STRING, :name => 'roleName'},
+     PRINCIPALNAME => {:type => ::Thrift::Types::STRING, :name => 'principalName'},
+     PRINCIPALTYPE => {:type => ::Thrift::Types::I32, :name => 'principalType', :enum_class => ::PrincipalType},
+     GRANTOR => {:type => ::Thrift::Types::STRING, :name => 'grantor', :optional => true},
+     GRANTORTYPE => {:type => ::Thrift::Types::I32, :name => 'grantorType', :optional => true, :enum_class => ::PrincipalType},
+     GRANTOPTION => {:type => ::Thrift::Types::BOOL, :name => 'grantOption', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @requestType.nil? || ::GrantRevokeType::VALID_VALUES.include?(@requestType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field requestType!')
+     end
+     unless @principalType.nil? || ::PrincipalType::VALID_VALUES.include?(@principalType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field principalType!')
+     end
+     unless @grantorType.nil? || ::PrincipalType::VALID_VALUES.include?(@grantorType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field grantorType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GrantRevokeRoleResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   SUCCESS = 1
+ 
+   FIELDS = {
+     SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Catalog
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+   DESCRIPTION = 2
+   LOCATIONURI = 3
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+     DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true},
+     LOCATIONURI => {:type => ::Thrift::Types::STRING, :name => 'locationUri'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class CreateCatalogRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATALOG = 1
+ 
+   FIELDS = {
+     CATALOG => {:type => ::Thrift::Types::STRUCT, :name => 'catalog', :class => ::Catalog}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AlterCatalogRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+   NEWCAT = 2
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+     NEWCAT => {:type => ::Thrift::Types::STRUCT, :name => 'newCat', :class => ::Catalog}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetCatalogRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetCatalogResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATALOG = 1
+ 
+   FIELDS = {
+     CATALOG => {:type => ::Thrift::Types::STRUCT, :name => 'catalog', :class => ::Catalog}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class GetCatalogsResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAMES = 1
+ 
+   FIELDS = {
+     NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DropCatalogRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Database
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+   DESCRIPTION = 2
+   LOCATIONURI = 3
+   PARAMETERS = 4
+   PRIVILEGES = 5
+   OWNERNAME = 6
+   OWNERTYPE = 7
+   CATALOGNAME = 8
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+     DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description'},
+     LOCATIONURI => {:type => ::Thrift::Types::STRING, :name => 'locationUri'},
+     PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+     PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
+     OWNERNAME => {:type => ::Thrift::Types::STRING, :name => 'ownerName', :optional => true},
+     OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :optional => true, :enum_class => ::PrincipalType},
+     CATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'catalogName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @ownerType.nil? || ::PrincipalType::VALID_VALUES.include?(@ownerType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field ownerType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SerDeInfo
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NAME = 1
+   SERIALIZATIONLIB = 2
+   PARAMETERS = 3
+   DESCRIPTION = 4
+   SERIALIZERCLASS = 5
+   DESERIALIZERCLASS = 6
+   SERDETYPE = 7
+ 
+   FIELDS = {
+     NAME => {:type => ::Thrift::Types::STRING, :name => 'name'},
+     SERIALIZATIONLIB => {:type => ::Thrift::Types::STRING, :name => 'serializationLib'},
+     PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+     DESCRIPTION => {:type => ::Thrift::Types::STRING, :name => 'description', :optional => true},
+     SERIALIZERCLASS => {:type => ::Thrift::Types::STRING, :name => 'serializerClass', :optional => true},
+     DESERIALIZERCLASS => {:type => ::Thrift::Types::STRING, :name => 'deserializerClass', :optional => true},
+     SERDETYPE => {:type => ::Thrift::Types::I32, :name => 'serdeType', :optional => true, :enum_class => ::SerdeType}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @serdeType.nil? || ::SerdeType::VALID_VALUES.include?(@serdeType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field serdeType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Order
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   COL = 1
+   ORDER = 2
+ 
+   FIELDS = {
+     COL => {:type => ::Thrift::Types::STRING, :name => 'col'},
+     ORDER => {:type => ::Thrift::Types::I32, :name => 'order'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SkewedInfo
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   SKEWEDCOLNAMES = 1
+   SKEWEDCOLVALUES = 2
+   SKEWEDCOLVALUELOCATIONMAPS = 3
+ 
+   FIELDS = {
+     SKEWEDCOLNAMES => {:type => ::Thrift::Types::LIST, :name => 'skewedColNames', :element => {:type => ::Thrift::Types::STRING}},
+     SKEWEDCOLVALUES => {:type => ::Thrift::Types::LIST, :name => 'skewedColValues', :element => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRING}}},
+     SKEWEDCOLVALUELOCATIONMAPS => {:type => ::Thrift::Types::MAP, :name => 'skewedColValueLocationMaps', :key => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRING}}, :value => {:type => ::Thrift::Types::STRING}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class StorageDescriptor
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   COLS = 1
+   LOCATION = 2
+   INPUTFORMAT = 3
+   OUTPUTFORMAT = 4
+   COMPRESSED = 5
+   NUMBUCKETS = 6
+   SERDEINFO = 7
+   BUCKETCOLS = 8
+   SORTCOLS = 9
+   PARAMETERS = 10
+   SKEWEDINFO = 11
+   STOREDASSUBDIRECTORIES = 12
+ 
+   FIELDS = {
+     COLS => {:type => ::Thrift::Types::LIST, :name => 'cols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}},
+     LOCATION => {:type => ::Thrift::Types::STRING, :name => 'location'},
+     INPUTFORMAT => {:type => ::Thrift::Types::STRING, :name => 'inputFormat'},
+     OUTPUTFORMAT => {:type => ::Thrift::Types::STRING, :name => 'outputFormat'},
+     COMPRESSED => {:type => ::Thrift::Types::BOOL, :name => 'compressed'},
+     NUMBUCKETS => {:type => ::Thrift::Types::I32, :name => 'numBuckets'},
+     SERDEINFO => {:type => ::Thrift::Types::STRUCT, :name => 'serdeInfo', :class => ::SerDeInfo},
+     BUCKETCOLS => {:type => ::Thrift::Types::LIST, :name => 'bucketCols', :element => {:type => ::Thrift::Types::STRING}},
+     SORTCOLS => {:type => ::Thrift::Types::LIST, :name => 'sortCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Order}},
+     PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+     SKEWEDINFO => {:type => ::Thrift::Types::STRUCT, :name => 'skewedInfo', :class => ::SkewedInfo, :optional => true},
+     STOREDASSUBDIRECTORIES => {:type => ::Thrift::Types::BOOL, :name => 'storedAsSubDirectories', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Table
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   TABLENAME = 1
+   DBNAME = 2
+   OWNER = 3
+   CREATETIME = 4
+   LASTACCESSTIME = 5
+   RETENTION = 6
+   SD = 7
+   PARTITIONKEYS = 8
+   PARAMETERS = 9
+   VIEWORIGINALTEXT = 10
+   VIEWEXPANDEDTEXT = 11
+   TABLETYPE = 12
+   PRIVILEGES = 13
+   TEMPORARY = 14
+   REWRITEENABLED = 15
+   CREATIONMETADATA = 16
+   CATNAME = 17
+   OWNERTYPE = 18
++  WRITEID = 19
++  ISSTATSCOMPLIANT = 20
+ 
+   FIELDS = {
+     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     OWNER => {:type => ::Thrift::Types::STRING, :name => 'owner'},
+     CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'},
+     LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'},
+     RETENTION => {:type => ::Thrift::Types::I32, :name => 'retention'},
+     SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor},
+     PARTITIONKEYS => {:type => ::Thrift::Types::LIST, :name => 'partitionKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}},
+     PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+     VIEWORIGINALTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewOriginalText'},
+     VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'},
+     TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'},
+     PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
+     TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true},
+     REWRITEENABLED => {:type => ::Thrift::Types::BOOL, :name => 'rewriteEnabled', :optional => true},
+     CREATIONMETADATA => {:type => ::Thrift::Types::STRUCT, :name => 'creationMetadata', :class => ::CreationMetadata, :optional => true},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
 -    OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default =>     1, :optional => true, :enum_class => ::PrincipalType}
++    OWNERTYPE => {:type => ::Thrift::Types::I32, :name => 'ownerType', :default =>     1, :optional => true, :enum_class => ::PrincipalType},
++    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @ownerType.nil? || ::PrincipalType::VALID_VALUES.include?(@ownerType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field ownerType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Partition
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   VALUES = 1
+   DBNAME = 2
+   TABLENAME = 3
+   CREATETIME = 4
+   LASTACCESSTIME = 5
+   SD = 6
+   PARAMETERS = 7
+   PRIVILEGES = 8
+   CATNAME = 9
++  WRITEID = 10
++  ISSTATSCOMPLIANT = 11
+ 
+   FIELDS = {
+     VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}},
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+     CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'},
+     LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'},
+     SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor},
+     PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+     PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true},
 -    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
++    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
++    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionWithoutSD
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   VALUES = 1
+   CREATETIME = 2
+   LASTACCESSTIME = 3
+   RELATIVEPATH = 4
+   PARAMETERS = 5
+   PRIVILEGES = 6
+ 
+   FIELDS = {
+     VALUES => {:type => ::Thrift::Types::LIST, :name => 'values', :element => {:type => ::Thrift::Types::STRING}},
+     CREATETIME => {:type => ::Thrift::Types::I32, :name => 'createTime'},
+     LASTACCESSTIME => {:type => ::Thrift::Types::I32, :name => 'lastAccessTime'},
+     RELATIVEPATH => {:type => ::Thrift::Types::STRING, :name => 'relativePath'},
+     PARAMETERS => {:type => ::Thrift::Types::MAP, :name => 'parameters', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}},
+     PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionSpecWithSharedSD
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTITIONS = 1
+   SD = 2
+ 
+   FIELDS = {
+     PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionWithoutSD}},
+     SD => {:type => ::Thrift::Types::STRUCT, :name => 'sd', :class => ::StorageDescriptor}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionListComposingSpec
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTITIONS = 1
+ 
+   FIELDS = {
+     PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionSpec
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TABLENAME = 2
+   ROOTPATH = 3
+   SHAREDSDPARTITIONSPEC = 4
+   PARTITIONLIST = 5
+   CATNAME = 6
++  WRITEID = 7
++  ISSTATSCOMPLIANT = 8
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+     ROOTPATH => {:type => ::Thrift::Types::STRING, :name => 'rootPath'},
+     SHAREDSDPARTITIONSPEC => {:type => ::Thrift::Types::STRUCT, :name => 'sharedSDPartitionSpec', :class => ::PartitionSpecWithSharedSD, :optional => true},
+     PARTITIONLIST => {:type => ::Thrift::Types::STRUCT, :name => 'partitionList', :class => ::PartitionListComposingSpec, :optional => true},
 -    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
++    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
++    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class BooleanColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NUMTRUES = 1
+   NUMFALSES = 2
+   NUMNULLS = 3
+   BITVECTORS = 4
+ 
+   FIELDS = {
+     NUMTRUES => {:type => ::Thrift::Types::I64, :name => 'numTrues'},
+     NUMFALSES => {:type => ::Thrift::Types::I64, :name => 'numFalses'},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numTrues is unset!') unless @numTrues
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numFalses is unset!') unless @numFalses
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DoubleColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   LOWVALUE = 1
+   HIGHVALUE = 2
+   NUMNULLS = 3
+   NUMDVS = 4
+   BITVECTORS = 5
+ 
+   FIELDS = {
+     LOWVALUE => {:type => ::Thrift::Types::DOUBLE, :name => 'lowValue', :optional => true},
+     HIGHVALUE => {:type => ::Thrift::Types::DOUBLE, :name => 'highValue', :optional => true},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     NUMDVS => {:type => ::Thrift::Types::I64, :name => 'numDVs'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numDVs is unset!') unless @numDVs
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class LongColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   LOWVALUE = 1
+   HIGHVALUE = 2
+   NUMNULLS = 3
+   NUMDVS = 4
+   BITVECTORS = 5
+ 
+   FIELDS = {
+     LOWVALUE => {:type => ::Thrift::Types::I64, :name => 'lowValue', :optional => true},
+     HIGHVALUE => {:type => ::Thrift::Types::I64, :name => 'highValue', :optional => true},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     NUMDVS => {:type => ::Thrift::Types::I64, :name => 'numDVs'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numDVs is unset!') unless @numDVs
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class StringColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   MAXCOLLEN = 1
+   AVGCOLLEN = 2
+   NUMNULLS = 3
+   NUMDVS = 4
+   BITVECTORS = 5
+ 
+   FIELDS = {
+     MAXCOLLEN => {:type => ::Thrift::Types::I64, :name => 'maxColLen'},
+     AVGCOLLEN => {:type => ::Thrift::Types::DOUBLE, :name => 'avgColLen'},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     NUMDVS => {:type => ::Thrift::Types::I64, :name => 'numDVs'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field maxColLen is unset!') unless @maxColLen
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field avgColLen is unset!') unless @avgColLen
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numDVs is unset!') unless @numDVs
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class BinaryColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   MAXCOLLEN = 1
+   AVGCOLLEN = 2
+   NUMNULLS = 3
+   BITVECTORS = 4
+ 
+   FIELDS = {
+     MAXCOLLEN => {:type => ::Thrift::Types::I64, :name => 'maxColLen'},
+     AVGCOLLEN => {:type => ::Thrift::Types::DOUBLE, :name => 'avgColLen'},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field maxColLen is unset!') unless @maxColLen
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field avgColLen is unset!') unless @avgColLen
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Decimal
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   SCALE = 3
+   UNSCALED = 1
+ 
+   FIELDS = {
+     SCALE => {:type => ::Thrift::Types::I16, :name => 'scale'},
+     UNSCALED => {:type => ::Thrift::Types::STRING, :name => 'unscaled', :binary => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field scale is unset!') unless @scale
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field unscaled is unset!') unless @unscaled
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DecimalColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   LOWVALUE = 1
+   HIGHVALUE = 2
+   NUMNULLS = 3
+   NUMDVS = 4
+   BITVECTORS = 5
+ 
+   FIELDS = {
+     LOWVALUE => {:type => ::Thrift::Types::STRUCT, :name => 'lowValue', :class => ::Decimal, :optional => true},
+     HIGHVALUE => {:type => ::Thrift::Types::STRUCT, :name => 'highValue', :class => ::Decimal, :optional => true},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     NUMDVS => {:type => ::Thrift::Types::I64, :name => 'numDVs'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numDVs is unset!') unless @numDVs
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Date
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DAYSSINCEEPOCH = 1
+ 
+   FIELDS = {
+     DAYSSINCEEPOCH => {:type => ::Thrift::Types::I64, :name => 'daysSinceEpoch'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field daysSinceEpoch is unset!') unless @daysSinceEpoch
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DateColumnStatsData
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   LOWVALUE = 1
+   HIGHVALUE = 2
+   NUMNULLS = 3
+   NUMDVS = 4
+   BITVECTORS = 5
+ 
+   FIELDS = {
+     LOWVALUE => {:type => ::Thrift::Types::STRUCT, :name => 'lowValue', :class => ::Date, :optional => true},
+     HIGHVALUE => {:type => ::Thrift::Types::STRUCT, :name => 'highValue', :class => ::Date, :optional => true},
+     NUMNULLS => {:type => ::Thrift::Types::I64, :name => 'numNulls'},
+     NUMDVS => {:type => ::Thrift::Types::I64, :name => 'numDVs'},
+     BITVECTORS => {:type => ::Thrift::Types::STRING, :name => 'bitVectors', :binary => true, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numNulls is unset!') unless @numNulls
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field numDVs is unset!') unless @numDVs
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class ColumnStatisticsData < ::Thrift::Union
+   include ::Thrift::Struct_Union
+   class << self
+     def booleanStats(val)
+       ColumnStatisticsData.new(:booleanStats, val)
+     end
+ 
+     def longStats(val)
+       ColumnStatisticsData.new(:longStats, val)
+     end
+ 
+     def doubleStats(val)
+       ColumnStatisticsData.new(:doubleStats, val)
+     end
+ 
+     def stringStats(val)
+       ColumnStatisticsData.new(:stringStats, val)
+     end
+ 
+     def binaryStats(val)
+       ColumnStatisticsData.new(:binaryStats, val)
+     end
+ 
+     def decimalStats(val)
+       ColumnStatisticsData.new(:decimalStats, val)
+     end
+ 
+     def dateStats(val)
+       ColumnStatisticsData.new(:dateStats, val)
+     end
+   end
+ 
+   BOOLEANSTATS = 1
+   LONGSTATS = 2
+   DOUBLESTATS = 3
+   STRINGSTATS = 4
+   BINARYSTATS = 5
+   DECIMALSTATS = 6
+   DATESTATS = 7
+ 
+   FIELDS = {
+     BOOLEANSTATS => {:type => ::Thrift::Types::STRUCT, :name => 'booleanStats', :class => ::BooleanColumnStatsData},
+     LONGSTATS => {:type => ::Thrift::Types::STRUCT, :name => 'longStats', :class => ::LongColumnStatsData},
+     DOUBLESTATS => {:type => ::Thrift::Types::STRUCT, :name => 'doubleStats', :class => ::DoubleColumnStatsData},
+     STRINGSTATS => {:type => ::Thrift::Types::STRUCT, :name => 'stringStats', :class => ::StringColumnStatsData},
+     BINARYSTATS => {:type => ::Thrift::Types::STRUCT, :name => 'binaryStats', :class => ::BinaryColumnStatsData},
+     DECIMALSTATS => {:type => ::Thrift::Types::STRUCT, :name => 'decimalStats', :class => ::DecimalColumnStatsData},
+     DATESTATS => {:type => ::Thrift::Types::STRUCT, :name => 'dateStats', :class => ::DateColumnStatsData}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise(StandardError, 'Union fields are not set.') if get_set_field.nil? || get_value.nil?
+   end
+ 
+   ::Thrift::Union.generate_accessors self
+ end
+ 
+ class ColumnStatisticsObj
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   COLNAME = 1
+   COLTYPE = 2
+   STATSDATA = 3
+ 
+   FIELDS = {
+     COLNAME => {:type => ::Thrift::Types::STRING, :name => 'colName'},
+     COLTYPE => {:type => ::Thrift::Types::STRING, :name => 'colType'},
+     STATSDATA => {:type => ::Thrift::Types::STRUCT, :name => 'statsData', :class => ::ColumnStatisticsData}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colName is unset!') unless @colName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colType is unset!') unless @colType
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsData is unset!') unless @statsData
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class ColumnStatisticsDesc
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   ISTBLLEVEL = 1
+   DBNAME = 2
+   TABLENAME = 3
+   PARTNAME = 4
+   LASTANALYZED = 5
+   CATNAME = 6
+ 
+   FIELDS = {
+     ISTBLLEVEL => {:type => ::Thrift::Types::BOOL, :name => 'isTblLevel'},
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'},
+     PARTNAME => {:type => ::Thrift::Types::STRING, :name => 'partName', :optional => true},
+     LASTANALYZED => {:type => ::Thrift::Types::I64, :name => 'lastAnalyzed', :optional => true},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field isTblLevel is unset!') if @isTblLevel.nil?
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableName is unset!') unless @tableName
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class ColumnStatistics
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   STATSDESC = 1
+   STATSOBJ = 2
++  TXNID = 3
++  VALIDWRITEIDLIST = 4
++  ISSTATSCOMPLIANT = 5
+ 
+   FIELDS = {
+     STATSDESC => {:type => ::Thrift::Types::STRUCT, :name => 'statsDesc', :class => ::ColumnStatisticsDesc},
 -    STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}
++    STATSOBJ => {:type => ::Thrift::Types::LIST, :name => 'statsObj', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
++    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
++    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsDesc is unset!') unless @statsDesc
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field statsObj is unset!') unless @statsObj
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AggrStats
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   COLSTATS = 1
+   PARTSFOUND = 2
++  ISSTATSCOMPLIANT = 3
+ 
+   FIELDS = {
+     COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
 -    PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'}
++    PARTSFOUND => {:type => ::Thrift::Types::I64, :name => 'partsFound'},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partsFound is unset!') unless @partsFound
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class SetPartitionsStatsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   COLSTATS = 1
+   NEEDMERGE = 2
++  TXNID = 3
++  WRITEID = 4
++  VALIDWRITEIDLIST = 5
+ 
+   FIELDS = {
+     COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}},
 -    NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true}
++    NEEDMERGE => {:type => ::Thrift::Types::BOOL, :name => 'needMerge', :optional => true},
++    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
++    WRITEID => {:type => ::Thrift::Types::I64, :name => 'writeId', :default => -1, :optional => true},
++    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Schema
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   FIELDSCHEMAS = 1
+   PROPERTIES = 2
+ 
+   FIELDS = {
+     FIELDSCHEMAS => {:type => ::Thrift::Types::LIST, :name => 'fieldSchemas', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}},
+     PROPERTIES => {:type => ::Thrift::Types::MAP, :name => 'properties', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class EnvironmentContext
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PROPERTIES = 1
+ 
+   FIELDS = {
+     PROPERTIES => {:type => ::Thrift::Types::MAP, :name => 'properties', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::STRING}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PrimaryKeysRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DB_NAME = 1
+   TBL_NAME = 2
+   CATNAME = 3
+ 
+   FIELDS = {
+     DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+     TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PrimaryKeysResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRIMARYKEYS = 1
+ 
+   FIELDS = {
+     PRIMARYKEYS => {:type => ::Thrift::Types::LIST, :name => 'primaryKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLPrimaryKey}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field primaryKeys is unset!') unless @primaryKeys
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class ForeignKeysRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARENT_DB_NAME = 1
+   PARENT_TBL_NAME = 2
+   FOREIGN_DB_NAME = 3
+   FOREIGN_TBL_NAME = 4
+   CATNAME = 5
+ 
+   FIELDS = {
+     PARENT_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_db_name'},
+     PARENT_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_tbl_name'},
+     FOREIGN_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_db_name'},
+     FOREIGN_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_tbl_name'},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class ForeignKeysResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   FOREIGNKEYS = 1
+ 
+   FIELDS = {
+     FOREIGNKEYS => {:type => ::Thrift::Types::LIST, :name => 'foreignKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLForeignKey}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field foreignKeys is unset!') unless @foreignKeys
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class UniqueConstraintsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   DB_NAME = 2
+   TBL_NAME = 3
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+     TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class UniqueConstraintsResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   UNIQUECONSTRAINTS = 1
+ 
+   FIELDS = {
+     UNIQUECONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'uniqueConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLUniqueConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field uniqueConstraints is unset!') unless @uniqueConstraints
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class NotNullConstraintsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   DB_NAME = 2
+   TBL_NAME = 3
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+     TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class NotNullConstraintsResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NOTNULLCONSTRAINTS = 1
+ 
+   FIELDS = {
+     NOTNULLCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'notNullConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLNotNullConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field notNullConstraints is unset!') unless @notNullConstraints
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DefaultConstraintsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   DB_NAME = 2
+   TBL_NAME = 3
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+     TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DefaultConstraintsResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DEFAULTCONSTRAINTS = 1
+ 
+   FIELDS = {
+     DEFAULTCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'defaultConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLDefaultConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field defaultConstraints is unset!') unless @defaultConstraints
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class CheckConstraintsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CATNAME = 1
+   DB_NAME = 2
+   TBL_NAME = 3
+ 
+   FIELDS = {
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName'},
+     DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'},
+     TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field catName is unset!') unless @catName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field db_name is unset!') unless @db_name
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tbl_name is unset!') unless @tbl_name
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class CheckConstraintsResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CHECKCONSTRAINTS = 1
+ 
+   FIELDS = {
+     CHECKCONSTRAINTS => {:type => ::Thrift::Types::LIST, :name => 'checkConstraints', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLCheckConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field checkConstraints is unset!') unless @checkConstraints
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DropConstraintRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TABLENAME = 2
+   CONSTRAINTNAME = 3
+   CATNAME = 4
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbname'},
+     TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tablename'},
+     CONSTRAINTNAME => {:type => ::Thrift::Types::STRING, :name => 'constraintname'},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbname is unset!') unless @dbname
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tablename is unset!') unless @tablename
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field constraintname is unset!') unless @constraintname
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddPrimaryKeyRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PRIMARYKEYCOLS = 1
+ 
+   FIELDS = {
+     PRIMARYKEYCOLS => {:type => ::Thrift::Types::LIST, :name => 'primaryKeyCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLPrimaryKey}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field primaryKeyCols is unset!') unless @primaryKeyCols
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddForeignKeyRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   FOREIGNKEYCOLS = 1
+ 
+   FIELDS = {
+     FOREIGNKEYCOLS => {:type => ::Thrift::Types::LIST, :name => 'foreignKeyCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLForeignKey}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field foreignKeyCols is unset!') unless @foreignKeyCols
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddUniqueConstraintRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   UNIQUECONSTRAINTCOLS = 1
+ 
+   FIELDS = {
+     UNIQUECONSTRAINTCOLS => {:type => ::Thrift::Types::LIST, :name => 'uniqueConstraintCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLUniqueConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field uniqueConstraintCols is unset!') unless @uniqueConstraintCols
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddNotNullConstraintRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   NOTNULLCONSTRAINTCOLS = 1
+ 
+   FIELDS = {
+     NOTNULLCONSTRAINTCOLS => {:type => ::Thrift::Types::LIST, :name => 'notNullConstraintCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLNotNullConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field notNullConstraintCols is unset!') unless @notNullConstraintCols
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddDefaultConstraintRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DEFAULTCONSTRAINTCOLS = 1
+ 
+   FIELDS = {
+     DEFAULTCONSTRAINTCOLS => {:type => ::Thrift::Types::LIST, :name => 'defaultConstraintCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLDefaultConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field defaultConstraintCols is unset!') unless @defaultConstraintCols
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddCheckConstraintRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   CHECKCONSTRAINTCOLS = 1
+ 
+   FIELDS = {
+     CHECKCONSTRAINTCOLS => {:type => ::Thrift::Types::LIST, :name => 'checkConstraintCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLCheckConstraint}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field checkConstraintCols is unset!') unless @checkConstraintCols
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionsByExprResult
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTITIONS = 1
+   HASUNKNOWNPARTITIONS = 2
+ 
+   FIELDS = {
+     PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
+     HASUNKNOWNPARTITIONS => {:type => ::Thrift::Types::BOOL, :name => 'hasUnknownPartitions'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitions is unset!') unless @partitions
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field hasUnknownPartitions is unset!') if @hasUnknownPartitions.nil?
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionsByExprRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TBLNAME = 2
+   EXPR = 3
+   DEFAULTPARTITIONNAME = 4
+   MAXPARTS = 5
+   CATNAME = 6
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+     EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true},
+     DEFAULTPARTITIONNAME => {:type => ::Thrift::Types::STRING, :name => 'defaultPartitionName', :optional => true},
+     MAXPARTS => {:type => ::Thrift::Types::I16, :name => 'maxParts', :default => -1, :optional => true},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field expr is unset!') unless @expr
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class TableStatsResult
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   TABLESTATS = 1
++  ISSTATSCOMPLIANT = 2
+ 
+   FIELDS = {
 -    TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}
++    TABLESTATS => {:type => ::Thrift::Types::LIST, :name => 'tableStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tableStats is unset!') unless @tableStats
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionsStatsResult
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTSTATS = 1
++  ISSTATSCOMPLIANT = 2
+ 
+   FIELDS = {
 -    PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}}
++    PARTSTATS => {:type => ::Thrift::Types::MAP, :name => 'partStats', :key => {:type => ::Thrift::Types::STRING}, :value => {:type => ::Thrift::Types::LIST, :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatisticsObj}}},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partStats is unset!') unless @partStats
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class TableStatsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TBLNAME = 2
+   COLNAMES = 3
+   CATNAME = 4
++  TXNID = 5
++  VALIDWRITEIDLIST = 6
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+     COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
 -    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
++    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
++    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
++    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colNames is unset!') unless @colNames
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionsStatsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TBLNAME = 2
+   COLNAMES = 3
+   PARTNAMES = 4
+   CATNAME = 5
++  TXNID = 6
++  VALIDWRITEIDLIST = 7
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+     COLNAMES => {:type => ::Thrift::Types::LIST, :name => 'colNames', :element => {:type => ::Thrift::Types::STRING}},
+     PARTNAMES => {:type => ::Thrift::Types::LIST, :name => 'partNames', :element => {:type => ::Thrift::Types::STRING}},
 -    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
++    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
++    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
++    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colNames is unset!') unless @colNames
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partNames is unset!') unless @partNames
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddPartitionsResult
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTITIONS = 1
++  ISSTATSCOMPLIANT = 2
+ 
+   FIELDS = {
 -    PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}
++    PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true},
++    ISSTATSCOMPLIANT => {:type => ::Thrift::Types::BOOL, :name => 'isStatsCompliant', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class AddPartitionsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TBLNAME = 2
+   PARTS = 3
+   IFNOTEXISTS = 4
+   NEEDRESULT = 5
+   CATNAME = 6
++  TXNID = 7
++  VALIDWRITEIDLIST = 8
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+     PARTS => {:type => ::Thrift::Types::LIST, :name => 'parts', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}},
+     IFNOTEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifNotExists'},
+     NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true},
 -    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
++    CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true},
++    TXNID => {:type => ::Thrift::Types::I64, :name => 'txnId', :default => -1, :optional => true},
++    VALIDWRITEIDLIST => {:type => ::Thrift::Types::STRING, :name => 'validWriteIdList', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field parts is unset!') unless @parts
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field ifNotExists is unset!') if @ifNotExists.nil?
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DropPartitionsResult
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTITIONS = 1
+ 
+   FIELDS = {
+     PARTITIONS => {:type => ::Thrift::Types::LIST, :name => 'partitions', :element => {:type => ::Thrift::Types::STRUCT, :class => ::Partition}, :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class DropPartitionsExpr
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   EXPR = 1
+   PARTARCHIVELEVEL = 2
+ 
+   FIELDS = {
+     EXPR => {:type => ::Thrift::Types::STRING, :name => 'expr', :binary => true},
+     PARTARCHIVELEVEL => {:type => ::Thrift::Types::I32, :name => 'partArchiveLevel', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field expr is unset!') unless @expr
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class RequestPartsSpec < ::Thrift::Union
+   include ::Thrift::Struct_Union
+   class << self
+     def names(val)
+       RequestPartsSpec.new(:names, val)
+     end
+ 
+     def exprs(val)
+       RequestPartsSpec.new(:exprs, val)
+     end
+   end
+ 
+   NAMES = 1
+   EXPRS = 2
+ 
+   FIELDS = {
+     NAMES => {:type => ::Thrift::Types::LIST, :name => 'names', :element => {:type => ::Thrift::Types::STRING}},
+     EXPRS => {:type => ::Thrift::Types::LIST, :name => 'exprs', :element => {:type => ::Thrift::Types::STRUCT, :class => ::DropPartitionsExpr}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise(StandardError, 'Union fields are not set.') if get_set_field.nil? || get_value.nil?
+   end
+ 
+   ::Thrift::Union.generate_accessors self
+ end
+ 
+ class DropPartitionsRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TBLNAME = 2
+   PARTS = 3
+   DELETEDATA = 4
+   IFEXISTS = 5
+   IGNOREPROTECTION = 6
+   ENVIRONMENTCONTEXT = 7
+   NEEDRESULT = 8
+   CATNAME = 9
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+     PARTS => {:type => ::Thrift::Types::STRUCT, :name => 'parts', :class => ::RequestPartsSpec},
+     DELETEDATA => {:type => ::Thrift::Types::BOOL, :name => 'deleteData', :optional => true},
+     IFEXISTS => {:type => ::Thrift::Types::BOOL, :name => 'ifExists', :default => true, :optional => true},
+     IGNOREPROTECTION => {:type => ::Thrift::Types::BOOL, :name => 'ignoreProtection', :optional => true},
+     ENVIRONMENTCONTEXT => {:type => ::Thrift::Types::STRUCT, :name => 'environmentContext', :class => ::EnvironmentContext, :optional => true},
+     NEEDRESULT => {:type => ::Thrift::Types::BOOL, :name => 'needResult', :default => true, :optional => true},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field parts is unset!') unless @parts
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionValuesRequest
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   DBNAME = 1
+   TBLNAME = 2
+   PARTITIONKEYS = 3
+   APPLYDISTINCT = 4
+   FILTER = 5
+   PARTITIONORDER = 6
+   ASCENDING = 7
+   MAXPARTS = 8
+   CATNAME = 9
+ 
+   FIELDS = {
+     DBNAME => {:type => ::Thrift::Types::STRING, :name => 'dbName'},
+     TBLNAME => {:type => ::Thrift::Types::STRING, :name => 'tblName'},
+     PARTITIONKEYS => {:type => ::Thrift::Types::LIST, :name => 'partitionKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}},
+     APPLYDISTINCT => {:type => ::Thrift::Types::BOOL, :name => 'applyDistinct', :default => true, :optional => true},
+     FILTER => {:type => ::Thrift::Types::STRING, :name => 'filter', :optional => true},
+     PARTITIONORDER => {:type => ::Thrift::Types::LIST, :name => 'partitionOrder', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}, :optional => true},
+     ASCENDING => {:type => ::Thrift::Types::BOOL, :name => 'ascending', :default => true, :optional => true},
+     MAXPARTS => {:type => ::Thrift::Types::I64, :name => 'maxParts', :default => -1, :optional => true},
+     CATNAME => {:type => ::Thrift::Types::STRING, :name => 'catName', :optional => true}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field dbName is unset!') unless @dbName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field tblName is unset!') unless @tblName
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitionKeys is unset!') unless @partitionKeys
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionValuesRow
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   ROW = 1
+ 
+   FIELDS = {
+     ROW => {:type => ::Thrift::Types::LIST, :name => 'row', :element => {:type => ::Thrift::Types::STRING}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field row is unset!') unless @row
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class PartitionValuesResponse
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   PARTITIONVALUES = 1
+ 
+   FIELDS = {
+     PARTITIONVALUES => {:type => ::Thrift::Types::LIST, :name => 'partitionValues', :element => {:type => ::Thrift::Types::STRUCT, :class => ::PartitionValuesRow}}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field partitionValues is unset!') unless @partitionValues
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class ResourceUri
+   include ::Thrift::Struct, ::Thrift::Struct_Union
+   RESOURCETYPE = 1
+   URI = 2
+ 
+   FIELDS = {
+     RESOURCETYPE => {:type => ::Thrift::Types::I32, :name => 'resourceType', :enum_class => ::ResourceType},
+     URI => {:type => ::Thrift::Types::STRING, :name => 'uri'}
+   }
+ 
+   def struct_fields; FIELDS; end
+ 
+   def validate
+     unless @resourceType.nil? || ::ResourceType::VALID_VALUES.include?(@resourceType)
+       raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field resourceType!')
+     end
+   end
+ 
+   ::Thrift::Struct.generate_accessors self
+ end
+ 
+ class Function
+   include ::Thrift::Str

<TRUNCATED>

[22/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
new file mode 100644
index 0000000..8f42847
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeRequest.java
@@ -0,0 +1,620 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GrantRevokePrivilegeRequest implements org.apache.thrift.TBase<GrantRevokePrivilegeRequest, GrantRevokePrivilegeRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokePrivilegeRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokePrivilegeRequest");
+
+  private static final org.apache.thrift.protocol.TField REQUEST_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("requestType", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField REVOKE_GRANT_OPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("revokeGrantOption", org.apache.thrift.protocol.TType.BOOL, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GrantRevokePrivilegeRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GrantRevokePrivilegeRequestTupleSchemeFactory());
+  }
+
+  private GrantRevokeType requestType; // required
+  private PrivilegeBag privileges; // required
+  private boolean revokeGrantOption; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see GrantRevokeType
+     */
+    REQUEST_TYPE((short)1, "requestType"),
+    PRIVILEGES((short)2, "privileges"),
+    REVOKE_GRANT_OPTION((short)3, "revokeGrantOption");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // REQUEST_TYPE
+          return REQUEST_TYPE;
+        case 2: // PRIVILEGES
+          return PRIVILEGES;
+        case 3: // REVOKE_GRANT_OPTION
+          return REVOKE_GRANT_OPTION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __REVOKEGRANTOPTION_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.REVOKE_GRANT_OPTION};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.REQUEST_TYPE, new org.apache.thrift.meta_data.FieldMetaData("requestType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, GrantRevokeType.class)));
+    tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrivilegeBag.class)));
+    tmpMap.put(_Fields.REVOKE_GRANT_OPTION, new org.apache.thrift.meta_data.FieldMetaData("revokeGrantOption", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GrantRevokePrivilegeRequest.class, metaDataMap);
+  }
+
+  public GrantRevokePrivilegeRequest() {
+  }
+
+  public GrantRevokePrivilegeRequest(
+    GrantRevokeType requestType,
+    PrivilegeBag privileges)
+  {
+    this();
+    this.requestType = requestType;
+    this.privileges = privileges;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GrantRevokePrivilegeRequest(GrantRevokePrivilegeRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetRequestType()) {
+      this.requestType = other.requestType;
+    }
+    if (other.isSetPrivileges()) {
+      this.privileges = new PrivilegeBag(other.privileges);
+    }
+    this.revokeGrantOption = other.revokeGrantOption;
+  }
+
+  public GrantRevokePrivilegeRequest deepCopy() {
+    return new GrantRevokePrivilegeRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.requestType = null;
+    this.privileges = null;
+    setRevokeGrantOptionIsSet(false);
+    this.revokeGrantOption = false;
+  }
+
+  /**
+   * 
+   * @see GrantRevokeType
+   */
+  public GrantRevokeType getRequestType() {
+    return this.requestType;
+  }
+
+  /**
+   * 
+   * @see GrantRevokeType
+   */
+  public void setRequestType(GrantRevokeType requestType) {
+    this.requestType = requestType;
+  }
+
+  public void unsetRequestType() {
+    this.requestType = null;
+  }
+
+  /** Returns true if field requestType is set (has been assigned a value) and false otherwise */
+  public boolean isSetRequestType() {
+    return this.requestType != null;
+  }
+
+  public void setRequestTypeIsSet(boolean value) {
+    if (!value) {
+      this.requestType = null;
+    }
+  }
+
+  public PrivilegeBag getPrivileges() {
+    return this.privileges;
+  }
+
+  public void setPrivileges(PrivilegeBag privileges) {
+    this.privileges = privileges;
+  }
+
+  public void unsetPrivileges() {
+    this.privileges = null;
+  }
+
+  /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivileges() {
+    return this.privileges != null;
+  }
+
+  public void setPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.privileges = null;
+    }
+  }
+
+  public boolean isRevokeGrantOption() {
+    return this.revokeGrantOption;
+  }
+
+  public void setRevokeGrantOption(boolean revokeGrantOption) {
+    this.revokeGrantOption = revokeGrantOption;
+    setRevokeGrantOptionIsSet(true);
+  }
+
+  public void unsetRevokeGrantOption() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REVOKEGRANTOPTION_ISSET_ID);
+  }
+
+  /** Returns true if field revokeGrantOption is set (has been assigned a value) and false otherwise */
+  public boolean isSetRevokeGrantOption() {
+    return EncodingUtils.testBit(__isset_bitfield, __REVOKEGRANTOPTION_ISSET_ID);
+  }
+
+  public void setRevokeGrantOptionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REVOKEGRANTOPTION_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case REQUEST_TYPE:
+      if (value == null) {
+        unsetRequestType();
+      } else {
+        setRequestType((GrantRevokeType)value);
+      }
+      break;
+
+    case PRIVILEGES:
+      if (value == null) {
+        unsetPrivileges();
+      } else {
+        setPrivileges((PrivilegeBag)value);
+      }
+      break;
+
+    case REVOKE_GRANT_OPTION:
+      if (value == null) {
+        unsetRevokeGrantOption();
+      } else {
+        setRevokeGrantOption((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case REQUEST_TYPE:
+      return getRequestType();
+
+    case PRIVILEGES:
+      return getPrivileges();
+
+    case REVOKE_GRANT_OPTION:
+      return isRevokeGrantOption();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case REQUEST_TYPE:
+      return isSetRequestType();
+    case PRIVILEGES:
+      return isSetPrivileges();
+    case REVOKE_GRANT_OPTION:
+      return isSetRevokeGrantOption();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GrantRevokePrivilegeRequest)
+      return this.equals((GrantRevokePrivilegeRequest)that);
+    return false;
+  }
+
+  public boolean equals(GrantRevokePrivilegeRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_requestType = true && this.isSetRequestType();
+    boolean that_present_requestType = true && that.isSetRequestType();
+    if (this_present_requestType || that_present_requestType) {
+      if (!(this_present_requestType && that_present_requestType))
+        return false;
+      if (!this.requestType.equals(that.requestType))
+        return false;
+    }
+
+    boolean this_present_privileges = true && this.isSetPrivileges();
+    boolean that_present_privileges = true && that.isSetPrivileges();
+    if (this_present_privileges || that_present_privileges) {
+      if (!(this_present_privileges && that_present_privileges))
+        return false;
+      if (!this.privileges.equals(that.privileges))
+        return false;
+    }
+
+    boolean this_present_revokeGrantOption = true && this.isSetRevokeGrantOption();
+    boolean that_present_revokeGrantOption = true && that.isSetRevokeGrantOption();
+    if (this_present_revokeGrantOption || that_present_revokeGrantOption) {
+      if (!(this_present_revokeGrantOption && that_present_revokeGrantOption))
+        return false;
+      if (this.revokeGrantOption != that.revokeGrantOption)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_requestType = true && (isSetRequestType());
+    list.add(present_requestType);
+    if (present_requestType)
+      list.add(requestType.getValue());
+
+    boolean present_privileges = true && (isSetPrivileges());
+    list.add(present_privileges);
+    if (present_privileges)
+      list.add(privileges);
+
+    boolean present_revokeGrantOption = true && (isSetRevokeGrantOption());
+    list.add(present_revokeGrantOption);
+    if (present_revokeGrantOption)
+      list.add(revokeGrantOption);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GrantRevokePrivilegeRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetRequestType()).compareTo(other.isSetRequestType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRequestType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestType, other.requestType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRevokeGrantOption()).compareTo(other.isSetRevokeGrantOption());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRevokeGrantOption()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.revokeGrantOption, other.revokeGrantOption);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GrantRevokePrivilegeRequest(");
+    boolean first = true;
+
+    sb.append("requestType:");
+    if (this.requestType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.requestType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("privileges:");
+    if (this.privileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.privileges);
+    }
+    first = false;
+    if (isSetRevokeGrantOption()) {
+      if (!first) sb.append(", ");
+      sb.append("revokeGrantOption:");
+      sb.append(this.revokeGrantOption);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (privileges != null) {
+      privileges.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GrantRevokePrivilegeRequestStandardSchemeFactory implements SchemeFactory {
+    public GrantRevokePrivilegeRequestStandardScheme getScheme() {
+      return new GrantRevokePrivilegeRequestStandardScheme();
+    }
+  }
+
+  private static class GrantRevokePrivilegeRequestStandardScheme extends StandardScheme<GrantRevokePrivilegeRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GrantRevokePrivilegeRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // REQUEST_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.requestType = org.apache.hadoop.hive.metastore.api.GrantRevokeType.findByValue(iprot.readI32());
+              struct.setRequestTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.privileges = new PrivilegeBag();
+              struct.privileges.read(iprot);
+              struct.setPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // REVOKE_GRANT_OPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.revokeGrantOption = iprot.readBool();
+              struct.setRevokeGrantOptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GrantRevokePrivilegeRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.requestType != null) {
+        oprot.writeFieldBegin(REQUEST_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.requestType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.privileges != null) {
+        oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+        struct.privileges.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetRevokeGrantOption()) {
+        oprot.writeFieldBegin(REVOKE_GRANT_OPTION_FIELD_DESC);
+        oprot.writeBool(struct.revokeGrantOption);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GrantRevokePrivilegeRequestTupleSchemeFactory implements SchemeFactory {
+    public GrantRevokePrivilegeRequestTupleScheme getScheme() {
+      return new GrantRevokePrivilegeRequestTupleScheme();
+    }
+  }
+
+  private static class GrantRevokePrivilegeRequestTupleScheme extends TupleScheme<GrantRevokePrivilegeRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GrantRevokePrivilegeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetRequestType()) {
+        optionals.set(0);
+      }
+      if (struct.isSetPrivileges()) {
+        optionals.set(1);
+      }
+      if (struct.isSetRevokeGrantOption()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetRequestType()) {
+        oprot.writeI32(struct.requestType.getValue());
+      }
+      if (struct.isSetPrivileges()) {
+        struct.privileges.write(oprot);
+      }
+      if (struct.isSetRevokeGrantOption()) {
+        oprot.writeBool(struct.revokeGrantOption);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GrantRevokePrivilegeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.requestType = org.apache.hadoop.hive.metastore.api.GrantRevokeType.findByValue(iprot.readI32());
+        struct.setRequestTypeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.privileges = new PrivilegeBag();
+        struct.privileges.read(iprot);
+        struct.setPrivilegesIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.revokeGrantOption = iprot.readBool();
+        struct.setRevokeGrantOptionIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
new file mode 100644
index 0000000..dc51bac
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokePrivilegeResponse.java
@@ -0,0 +1,390 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GrantRevokePrivilegeResponse implements org.apache.thrift.TBase<GrantRevokePrivilegeResponse, GrantRevokePrivilegeResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokePrivilegeResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokePrivilegeResponse");
+
+  private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GrantRevokePrivilegeResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GrantRevokePrivilegeResponseTupleSchemeFactory());
+  }
+
+  private boolean success; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SUCCESS((short)1, "success");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SUCCESS
+          return SUCCESS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __SUCCESS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.SUCCESS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GrantRevokePrivilegeResponse.class, metaDataMap);
+  }
+
+  public GrantRevokePrivilegeResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GrantRevokePrivilegeResponse(GrantRevokePrivilegeResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.success = other.success;
+  }
+
+  public GrantRevokePrivilegeResponse deepCopy() {
+    return new GrantRevokePrivilegeResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setSuccessIsSet(false);
+    this.success = false;
+  }
+
+  public boolean isSuccess() {
+    return this.success;
+  }
+
+  public void setSuccess(boolean success) {
+    this.success = success;
+    setSuccessIsSet(true);
+  }
+
+  public void unsetSuccess() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
+  }
+
+  /** Returns true if field success is set (has been assigned a value) and false otherwise */
+  public boolean isSetSuccess() {
+    return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
+  }
+
+  public void setSuccessIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SUCCESS:
+      if (value == null) {
+        unsetSuccess();
+      } else {
+        setSuccess((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SUCCESS:
+      return isSuccess();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SUCCESS:
+      return isSetSuccess();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GrantRevokePrivilegeResponse)
+      return this.equals((GrantRevokePrivilegeResponse)that);
+    return false;
+  }
+
+  public boolean equals(GrantRevokePrivilegeResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_success = true && this.isSetSuccess();
+    boolean that_present_success = true && that.isSetSuccess();
+    if (this_present_success || that_present_success) {
+      if (!(this_present_success && that_present_success))
+        return false;
+      if (this.success != that.success)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_success = true && (isSetSuccess());
+    list.add(present_success);
+    if (present_success)
+      list.add(success);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GrantRevokePrivilegeResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSuccess()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GrantRevokePrivilegeResponse(");
+    boolean first = true;
+
+    if (isSetSuccess()) {
+      sb.append("success:");
+      sb.append(this.success);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GrantRevokePrivilegeResponseStandardSchemeFactory implements SchemeFactory {
+    public GrantRevokePrivilegeResponseStandardScheme getScheme() {
+      return new GrantRevokePrivilegeResponseStandardScheme();
+    }
+  }
+
+  private static class GrantRevokePrivilegeResponseStandardScheme extends StandardScheme<GrantRevokePrivilegeResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GrantRevokePrivilegeResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SUCCESS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.success = iprot.readBool();
+              struct.setSuccessIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GrantRevokePrivilegeResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetSuccess()) {
+        oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+        oprot.writeBool(struct.success);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GrantRevokePrivilegeResponseTupleSchemeFactory implements SchemeFactory {
+    public GrantRevokePrivilegeResponseTupleScheme getScheme() {
+      return new GrantRevokePrivilegeResponseTupleScheme();
+    }
+  }
+
+  private static class GrantRevokePrivilegeResponseTupleScheme extends TupleScheme<GrantRevokePrivilegeResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GrantRevokePrivilegeResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSuccess()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetSuccess()) {
+        oprot.writeBool(struct.success);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GrantRevokePrivilegeResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.success = iprot.readBool();
+        struct.setSuccessIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
new file mode 100644
index 0000000..0de3fb6
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleRequest.java
@@ -0,0 +1,1059 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GrantRevokeRoleRequest implements org.apache.thrift.TBase<GrantRevokeRoleRequest, GrantRevokeRoleRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokeRoleRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokeRoleRequest");
+
+  private static final org.apache.thrift.protocol.TField REQUEST_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("requestType", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("principalName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("principalType", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField GRANTOR_FIELD_DESC = new org.apache.thrift.protocol.TField("grantor", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField GRANTOR_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("grantorType", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField GRANT_OPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("grantOption", org.apache.thrift.protocol.TType.BOOL, (short)7);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GrantRevokeRoleRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GrantRevokeRoleRequestTupleSchemeFactory());
+  }
+
+  private GrantRevokeType requestType; // required
+  private String roleName; // required
+  private String principalName; // required
+  private PrincipalType principalType; // required
+  private String grantor; // optional
+  private PrincipalType grantorType; // optional
+  private boolean grantOption; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see GrantRevokeType
+     */
+    REQUEST_TYPE((short)1, "requestType"),
+    ROLE_NAME((short)2, "roleName"),
+    PRINCIPAL_NAME((short)3, "principalName"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    PRINCIPAL_TYPE((short)4, "principalType"),
+    GRANTOR((short)5, "grantor"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    GRANTOR_TYPE((short)6, "grantorType"),
+    GRANT_OPTION((short)7, "grantOption");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // REQUEST_TYPE
+          return REQUEST_TYPE;
+        case 2: // ROLE_NAME
+          return ROLE_NAME;
+        case 3: // PRINCIPAL_NAME
+          return PRINCIPAL_NAME;
+        case 4: // PRINCIPAL_TYPE
+          return PRINCIPAL_TYPE;
+        case 5: // GRANTOR
+          return GRANTOR;
+        case 6: // GRANTOR_TYPE
+          return GRANTOR_TYPE;
+        case 7: // GRANT_OPTION
+          return GRANT_OPTION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __GRANTOPTION_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.GRANTOR,_Fields.GRANTOR_TYPE,_Fields.GRANT_OPTION};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.REQUEST_TYPE, new org.apache.thrift.meta_data.FieldMetaData("requestType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, GrantRevokeType.class)));
+    tmpMap.put(_Fields.ROLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("roleName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PRINCIPAL_NAME, new org.apache.thrift.meta_data.FieldMetaData("principalName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PRINCIPAL_TYPE, new org.apache.thrift.meta_data.FieldMetaData("principalType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.GRANTOR, new org.apache.thrift.meta_data.FieldMetaData("grantor", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.GRANTOR_TYPE, new org.apache.thrift.meta_data.FieldMetaData("grantorType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.GRANT_OPTION, new org.apache.thrift.meta_data.FieldMetaData("grantOption", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GrantRevokeRoleRequest.class, metaDataMap);
+  }
+
+  public GrantRevokeRoleRequest() {
+  }
+
+  public GrantRevokeRoleRequest(
+    GrantRevokeType requestType,
+    String roleName,
+    String principalName,
+    PrincipalType principalType)
+  {
+    this();
+    this.requestType = requestType;
+    this.roleName = roleName;
+    this.principalName = principalName;
+    this.principalType = principalType;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GrantRevokeRoleRequest(GrantRevokeRoleRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetRequestType()) {
+      this.requestType = other.requestType;
+    }
+    if (other.isSetRoleName()) {
+      this.roleName = other.roleName;
+    }
+    if (other.isSetPrincipalName()) {
+      this.principalName = other.principalName;
+    }
+    if (other.isSetPrincipalType()) {
+      this.principalType = other.principalType;
+    }
+    if (other.isSetGrantor()) {
+      this.grantor = other.grantor;
+    }
+    if (other.isSetGrantorType()) {
+      this.grantorType = other.grantorType;
+    }
+    this.grantOption = other.grantOption;
+  }
+
+  public GrantRevokeRoleRequest deepCopy() {
+    return new GrantRevokeRoleRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.requestType = null;
+    this.roleName = null;
+    this.principalName = null;
+    this.principalType = null;
+    this.grantor = null;
+    this.grantorType = null;
+    setGrantOptionIsSet(false);
+    this.grantOption = false;
+  }
+
+  /**
+   * 
+   * @see GrantRevokeType
+   */
+  public GrantRevokeType getRequestType() {
+    return this.requestType;
+  }
+
+  /**
+   * 
+   * @see GrantRevokeType
+   */
+  public void setRequestType(GrantRevokeType requestType) {
+    this.requestType = requestType;
+  }
+
+  public void unsetRequestType() {
+    this.requestType = null;
+  }
+
+  /** Returns true if field requestType is set (has been assigned a value) and false otherwise */
+  public boolean isSetRequestType() {
+    return this.requestType != null;
+  }
+
+  public void setRequestTypeIsSet(boolean value) {
+    if (!value) {
+      this.requestType = null;
+    }
+  }
+
+  public String getRoleName() {
+    return this.roleName;
+  }
+
+  public void setRoleName(String roleName) {
+    this.roleName = roleName;
+  }
+
+  public void unsetRoleName() {
+    this.roleName = null;
+  }
+
+  /** Returns true if field roleName is set (has been assigned a value) and false otherwise */
+  public boolean isSetRoleName() {
+    return this.roleName != null;
+  }
+
+  public void setRoleNameIsSet(boolean value) {
+    if (!value) {
+      this.roleName = null;
+    }
+  }
+
+  public String getPrincipalName() {
+    return this.principalName;
+  }
+
+  public void setPrincipalName(String principalName) {
+    this.principalName = principalName;
+  }
+
+  public void unsetPrincipalName() {
+    this.principalName = null;
+  }
+
+  /** Returns true if field principalName is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalName() {
+    return this.principalName != null;
+  }
+
+  public void setPrincipalNameIsSet(boolean value) {
+    if (!value) {
+      this.principalName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getPrincipalType() {
+    return this.principalType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setPrincipalType(PrincipalType principalType) {
+    this.principalType = principalType;
+  }
+
+  public void unsetPrincipalType() {
+    this.principalType = null;
+  }
+
+  /** Returns true if field principalType is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalType() {
+    return this.principalType != null;
+  }
+
+  public void setPrincipalTypeIsSet(boolean value) {
+    if (!value) {
+      this.principalType = null;
+    }
+  }
+
+  public String getGrantor() {
+    return this.grantor;
+  }
+
+  public void setGrantor(String grantor) {
+    this.grantor = grantor;
+  }
+
+  public void unsetGrantor() {
+    this.grantor = null;
+  }
+
+  /** Returns true if field grantor is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantor() {
+    return this.grantor != null;
+  }
+
+  public void setGrantorIsSet(boolean value) {
+    if (!value) {
+      this.grantor = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getGrantorType() {
+    return this.grantorType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setGrantorType(PrincipalType grantorType) {
+    this.grantorType = grantorType;
+  }
+
+  public void unsetGrantorType() {
+    this.grantorType = null;
+  }
+
+  /** Returns true if field grantorType is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantorType() {
+    return this.grantorType != null;
+  }
+
+  public void setGrantorTypeIsSet(boolean value) {
+    if (!value) {
+      this.grantorType = null;
+    }
+  }
+
+  public boolean isGrantOption() {
+    return this.grantOption;
+  }
+
+  public void setGrantOption(boolean grantOption) {
+    this.grantOption = grantOption;
+    setGrantOptionIsSet(true);
+  }
+
+  public void unsetGrantOption() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GRANTOPTION_ISSET_ID);
+  }
+
+  /** Returns true if field grantOption is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantOption() {
+    return EncodingUtils.testBit(__isset_bitfield, __GRANTOPTION_ISSET_ID);
+  }
+
+  public void setGrantOptionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GRANTOPTION_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case REQUEST_TYPE:
+      if (value == null) {
+        unsetRequestType();
+      } else {
+        setRequestType((GrantRevokeType)value);
+      }
+      break;
+
+    case ROLE_NAME:
+      if (value == null) {
+        unsetRoleName();
+      } else {
+        setRoleName((String)value);
+      }
+      break;
+
+    case PRINCIPAL_NAME:
+      if (value == null) {
+        unsetPrincipalName();
+      } else {
+        setPrincipalName((String)value);
+      }
+      break;
+
+    case PRINCIPAL_TYPE:
+      if (value == null) {
+        unsetPrincipalType();
+      } else {
+        setPrincipalType((PrincipalType)value);
+      }
+      break;
+
+    case GRANTOR:
+      if (value == null) {
+        unsetGrantor();
+      } else {
+        setGrantor((String)value);
+      }
+      break;
+
+    case GRANTOR_TYPE:
+      if (value == null) {
+        unsetGrantorType();
+      } else {
+        setGrantorType((PrincipalType)value);
+      }
+      break;
+
+    case GRANT_OPTION:
+      if (value == null) {
+        unsetGrantOption();
+      } else {
+        setGrantOption((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case REQUEST_TYPE:
+      return getRequestType();
+
+    case ROLE_NAME:
+      return getRoleName();
+
+    case PRINCIPAL_NAME:
+      return getPrincipalName();
+
+    case PRINCIPAL_TYPE:
+      return getPrincipalType();
+
+    case GRANTOR:
+      return getGrantor();
+
+    case GRANTOR_TYPE:
+      return getGrantorType();
+
+    case GRANT_OPTION:
+      return isGrantOption();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case REQUEST_TYPE:
+      return isSetRequestType();
+    case ROLE_NAME:
+      return isSetRoleName();
+    case PRINCIPAL_NAME:
+      return isSetPrincipalName();
+    case PRINCIPAL_TYPE:
+      return isSetPrincipalType();
+    case GRANTOR:
+      return isSetGrantor();
+    case GRANTOR_TYPE:
+      return isSetGrantorType();
+    case GRANT_OPTION:
+      return isSetGrantOption();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GrantRevokeRoleRequest)
+      return this.equals((GrantRevokeRoleRequest)that);
+    return false;
+  }
+
+  public boolean equals(GrantRevokeRoleRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_requestType = true && this.isSetRequestType();
+    boolean that_present_requestType = true && that.isSetRequestType();
+    if (this_present_requestType || that_present_requestType) {
+      if (!(this_present_requestType && that_present_requestType))
+        return false;
+      if (!this.requestType.equals(that.requestType))
+        return false;
+    }
+
+    boolean this_present_roleName = true && this.isSetRoleName();
+    boolean that_present_roleName = true && that.isSetRoleName();
+    if (this_present_roleName || that_present_roleName) {
+      if (!(this_present_roleName && that_present_roleName))
+        return false;
+      if (!this.roleName.equals(that.roleName))
+        return false;
+    }
+
+    boolean this_present_principalName = true && this.isSetPrincipalName();
+    boolean that_present_principalName = true && that.isSetPrincipalName();
+    if (this_present_principalName || that_present_principalName) {
+      if (!(this_present_principalName && that_present_principalName))
+        return false;
+      if (!this.principalName.equals(that.principalName))
+        return false;
+    }
+
+    boolean this_present_principalType = true && this.isSetPrincipalType();
+    boolean that_present_principalType = true && that.isSetPrincipalType();
+    if (this_present_principalType || that_present_principalType) {
+      if (!(this_present_principalType && that_present_principalType))
+        return false;
+      if (!this.principalType.equals(that.principalType))
+        return false;
+    }
+
+    boolean this_present_grantor = true && this.isSetGrantor();
+    boolean that_present_grantor = true && that.isSetGrantor();
+    if (this_present_grantor || that_present_grantor) {
+      if (!(this_present_grantor && that_present_grantor))
+        return false;
+      if (!this.grantor.equals(that.grantor))
+        return false;
+    }
+
+    boolean this_present_grantorType = true && this.isSetGrantorType();
+    boolean that_present_grantorType = true && that.isSetGrantorType();
+    if (this_present_grantorType || that_present_grantorType) {
+      if (!(this_present_grantorType && that_present_grantorType))
+        return false;
+      if (!this.grantorType.equals(that.grantorType))
+        return false;
+    }
+
+    boolean this_present_grantOption = true && this.isSetGrantOption();
+    boolean that_present_grantOption = true && that.isSetGrantOption();
+    if (this_present_grantOption || that_present_grantOption) {
+      if (!(this_present_grantOption && that_present_grantOption))
+        return false;
+      if (this.grantOption != that.grantOption)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_requestType = true && (isSetRequestType());
+    list.add(present_requestType);
+    if (present_requestType)
+      list.add(requestType.getValue());
+
+    boolean present_roleName = true && (isSetRoleName());
+    list.add(present_roleName);
+    if (present_roleName)
+      list.add(roleName);
+
+    boolean present_principalName = true && (isSetPrincipalName());
+    list.add(present_principalName);
+    if (present_principalName)
+      list.add(principalName);
+
+    boolean present_principalType = true && (isSetPrincipalType());
+    list.add(present_principalType);
+    if (present_principalType)
+      list.add(principalType.getValue());
+
+    boolean present_grantor = true && (isSetGrantor());
+    list.add(present_grantor);
+    if (present_grantor)
+      list.add(grantor);
+
+    boolean present_grantorType = true && (isSetGrantorType());
+    list.add(present_grantorType);
+    if (present_grantorType)
+      list.add(grantorType.getValue());
+
+    boolean present_grantOption = true && (isSetGrantOption());
+    list.add(present_grantOption);
+    if (present_grantOption)
+      list.add(grantOption);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GrantRevokeRoleRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetRequestType()).compareTo(other.isSetRequestType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRequestType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestType, other.requestType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRoleName()).compareTo(other.isSetRoleName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRoleName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.roleName, other.roleName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipalName()).compareTo(other.isSetPrincipalName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalName, other.principalName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipalType()).compareTo(other.isSetPrincipalType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalType, other.principalType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantor()).compareTo(other.isSetGrantor());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantor()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantor, other.grantor);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantorType()).compareTo(other.isSetGrantorType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantorType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantorType, other.grantorType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantOption()).compareTo(other.isSetGrantOption());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantOption()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantOption, other.grantOption);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GrantRevokeRoleRequest(");
+    boolean first = true;
+
+    sb.append("requestType:");
+    if (this.requestType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.requestType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("roleName:");
+    if (this.roleName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.roleName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principalName:");
+    if (this.principalName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principalType:");
+    if (this.principalType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalType);
+    }
+    first = false;
+    if (isSetGrantor()) {
+      if (!first) sb.append(", ");
+      sb.append("grantor:");
+      if (this.grantor == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.grantor);
+      }
+      first = false;
+    }
+    if (isSetGrantorType()) {
+      if (!first) sb.append(", ");
+      sb.append("grantorType:");
+      if (this.grantorType == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.grantorType);
+      }
+      first = false;
+    }
+    if (isSetGrantOption()) {
+      if (!first) sb.append(", ");
+      sb.append("grantOption:");
+      sb.append(this.grantOption);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GrantRevokeRoleRequestStandardSchemeFactory implements SchemeFactory {
+    public GrantRevokeRoleRequestStandardScheme getScheme() {
+      return new GrantRevokeRoleRequestStandardScheme();
+    }
+  }
+
+  private static class GrantRevokeRoleRequestStandardScheme extends StandardScheme<GrantRevokeRoleRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GrantRevokeRoleRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // REQUEST_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.requestType = org.apache.hadoop.hive.metastore.api.GrantRevokeType.findByValue(iprot.readI32());
+              struct.setRequestTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ROLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.roleName = iprot.readString();
+              struct.setRoleNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PRINCIPAL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.principalName = iprot.readString();
+              struct.setPrincipalNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PRINCIPAL_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.principalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setPrincipalTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // GRANTOR
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.grantor = iprot.readString();
+              struct.setGrantorIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // GRANTOR_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.grantorType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setGrantorTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // GRANT_OPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.grantOption = iprot.readBool();
+              struct.setGrantOptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GrantRevokeRoleRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.requestType != null) {
+        oprot.writeFieldBegin(REQUEST_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.requestType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.roleName != null) {
+        oprot.writeFieldBegin(ROLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.roleName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principalName != null) {
+        oprot.writeFieldBegin(PRINCIPAL_NAME_FIELD_DESC);
+        oprot.writeString(struct.principalName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principalType != null) {
+        oprot.writeFieldBegin(PRINCIPAL_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.principalType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.grantor != null) {
+        if (struct.isSetGrantor()) {
+          oprot.writeFieldBegin(GRANTOR_FIELD_DESC);
+          oprot.writeString(struct.grantor);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.grantorType != null) {
+        if (struct.isSetGrantorType()) {
+          oprot.writeFieldBegin(GRANTOR_TYPE_FIELD_DESC);
+          oprot.writeI32(struct.grantorType.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetGrantOption()) {
+        oprot.writeFieldBegin(GRANT_OPTION_FIELD_DESC);
+        oprot.writeBool(struct.grantOption);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GrantRevokeRoleRequestTupleSchemeFactory implements SchemeFactory {
+    public GrantRevokeRoleRequestTupleScheme getScheme() {
+      return new GrantRevokeRoleRequestTupleScheme();
+    }
+  }
+
+  private static class GrantRevokeRoleRequestTupleScheme extends TupleScheme<GrantRevokeRoleRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GrantRevokeRoleRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetRequestType()) {
+        optionals.set(0);
+      }
+      if (struct.isSetRoleName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetPrincipalName()) {
+        optionals.set(2);
+      }
+      if (struct.isSetPrincipalType()) {
+        optionals.set(3);
+      }
+      if (struct.isSetGrantor()) {
+        optionals.set(4);
+      }
+      if (struct.isSetGrantorType()) {
+        optionals.set(5);
+      }
+      if (struct.isSetGrantOption()) {
+        optionals.set(6);
+      }
+      oprot.writeBitSet(optionals, 7);
+      if (struct.isSetRequestType()) {
+        oprot.writeI32(struct.requestType.getValue());
+      }
+      if (struct.isSetRoleName()) {
+        oprot.writeString(struct.roleName);
+      }
+      if (struct.isSetPrincipalName()) {
+        oprot.writeString(struct.principalName);
+      }
+      if (struct.isSetPrincipalType()) {
+        oprot.writeI32(struct.principalType.getValue());
+      }
+      if (struct.isSetGrantor()) {
+        oprot.writeString(struct.grantor);
+      }
+      if (struct.isSetGrantorType()) {
+        oprot.writeI32(struct.grantorType.getValue());
+      }
+      if (struct.isSetGrantOption()) {
+        oprot.writeBool(struct.grantOption);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GrantRevokeRoleRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(7);
+      if (incoming.get(0)) {
+        struct.requestType = org.apache.hadoop.hive.metastore.api.GrantRevokeType.findByValue(iprot.readI32());
+        struct.setRequestTypeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.roleName = iprot.readString();
+        struct.setRoleNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.principalName = iprot.readString();
+        struct.setPrincipalNameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.principalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setPrincipalTypeIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.grantor = iprot.readString();
+        struct.setGrantorIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.grantorType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setGrantorTypeIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.grantOption = iprot.readBool();
+        struct.setGrantOptionIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
new file mode 100644
index 0000000..0602f1d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeRoleResponse.java
@@ -0,0 +1,390 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GrantRevokeRoleResponse implements org.apache.thrift.TBase<GrantRevokeRoleResponse, GrantRevokeRoleResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GrantRevokeRoleResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GrantRevokeRoleResponse");
+
+  private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GrantRevokeRoleResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GrantRevokeRoleResponseTupleSchemeFactory());
+  }
+
+  private boolean success; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SUCCESS((short)1, "success");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SUCCESS
+          return SUCCESS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __SUCCESS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.SUCCESS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GrantRevokeRoleResponse.class, metaDataMap);
+  }
+
+  public GrantRevokeRoleResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GrantRevokeRoleResponse(GrantRevokeRoleResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.success = other.success;
+  }
+
+  public GrantRevokeRoleResponse deepCopy() {
+    return new GrantRevokeRoleResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setSuccessIsSet(false);
+    this.success = false;
+  }
+
+  public boolean isSuccess() {
+    return this.success;
+  }
+
+  public void setSuccess(boolean success) {
+    this.success = success;
+    setSuccessIsSet(true);
+  }
+
+  public void unsetSuccess() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID);
+  }
+
+  /** Returns true if field success is set (has been assigned a value) and false otherwise */
+  public boolean isSetSuccess() {
+    return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID);
+  }
+
+  public void setSuccessIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SUCCESS:
+      if (value == null) {
+        unsetSuccess();
+      } else {
+        setSuccess((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SUCCESS:
+      return isSuccess();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SUCCESS:
+      return isSetSuccess();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GrantRevokeRoleResponse)
+      return this.equals((GrantRevokeRoleResponse)that);
+    return false;
+  }
+
+  public boolean equals(GrantRevokeRoleResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_success = true && this.isSetSuccess();
+    boolean that_present_success = true && that.isSetSuccess();
+    if (this_present_success || that_present_success) {
+      if (!(this_present_success && that_present_success))
+        return false;
+      if (this.success != that.success)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_success = true && (isSetSuccess());
+    list.add(present_success);
+    if (present_success)
+      list.add(success);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GrantRevokeRoleResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSuccess()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GrantRevokeRoleResponse(");
+    boolean first = true;
+
+    if (isSetSuccess()) {
+      sb.append("success:");
+      sb.append(this.success);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GrantRevokeRoleResponseStandardSchemeFactory implements SchemeFactory {
+    public GrantRevokeRoleResponseStandardScheme getScheme() {
+      return new GrantRevokeRoleResponseStandardScheme();
+    }
+  }
+
+  private static class GrantRevokeRoleResponseStandardScheme extends StandardScheme<GrantRevokeRoleResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GrantRevokeRoleResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SUCCESS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.success = iprot.readBool();
+              struct.setSuccessIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GrantRevokeRoleResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetSuccess()) {
+        oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+        oprot.writeBool(struct.success);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GrantRevokeRoleResponseTupleSchemeFactory implements SchemeFactory {
+    public GrantRevokeRoleResponseTupleScheme getScheme() {
+      return new GrantRevokeRoleResponseTupleScheme();
+    }
+  }
+
+  private static class GrantRevokeRoleResponseTupleScheme extends TupleScheme<GrantRevokeRoleResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GrantRevokeRoleResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSuccess()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetSuccess()) {
+        oprot.writeBool(struct.success);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GrantRevokeRoleResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.success = iprot.readBool();
+        struct.setSuccessIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeType.java
new file mode 100644
index 0000000..ac65ec7
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GrantRevokeType.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum GrantRevokeType implements org.apache.thrift.TEnum {
+  GRANT(1),
+  REVOKE(2);
+
+  private final int value;
+
+  private GrantRevokeType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static GrantRevokeType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return GRANT;
+      case 2:
+        return REVOKE;
+      default:
+        return null;
+    }
+  }
+}


[10/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java
new file mode 100644
index 0000000..c8707ca
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRequest.java
@@ -0,0 +1,1328 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionValuesRequest implements org.apache.thrift.TBase<PartitionValuesRequest, PartitionValuesRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionValuesRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionValuesRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PARTITION_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionKeys", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField APPLY_DISTINCT_FIELD_DESC = new org.apache.thrift.protocol.TField("applyDistinct", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField FILTER_FIELD_DESC = new org.apache.thrift.protocol.TField("filter", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField PARTITION_ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionOrder", org.apache.thrift.protocol.TType.LIST, (short)6);
+  private static final org.apache.thrift.protocol.TField ASCENDING_FIELD_DESC = new org.apache.thrift.protocol.TField("ascending", org.apache.thrift.protocol.TType.BOOL, (short)7);
+  private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I64, (short)8);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionValuesRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionValuesRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private List<FieldSchema> partitionKeys; // required
+  private boolean applyDistinct; // optional
+  private String filter; // optional
+  private List<FieldSchema> partitionOrder; // optional
+  private boolean ascending; // optional
+  private long maxParts; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    PARTITION_KEYS((short)3, "partitionKeys"),
+    APPLY_DISTINCT((short)4, "applyDistinct"),
+    FILTER((short)5, "filter"),
+    PARTITION_ORDER((short)6, "partitionOrder"),
+    ASCENDING((short)7, "ascending"),
+    MAX_PARTS((short)8, "maxParts"),
+    CAT_NAME((short)9, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // PARTITION_KEYS
+          return PARTITION_KEYS;
+        case 4: // APPLY_DISTINCT
+          return APPLY_DISTINCT;
+        case 5: // FILTER
+          return FILTER;
+        case 6: // PARTITION_ORDER
+          return PARTITION_ORDER;
+        case 7: // ASCENDING
+          return ASCENDING;
+        case 8: // MAX_PARTS
+          return MAX_PARTS;
+        case 9: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __APPLYDISTINCT_ISSET_ID = 0;
+  private static final int __ASCENDING_ISSET_ID = 1;
+  private static final int __MAXPARTS_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.APPLY_DISTINCT,_Fields.FILTER,_Fields.PARTITION_ORDER,_Fields.ASCENDING,_Fields.MAX_PARTS,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITION_KEYS, new org.apache.thrift.meta_data.FieldMetaData("partitionKeys", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class))));
+    tmpMap.put(_Fields.APPLY_DISTINCT, new org.apache.thrift.meta_data.FieldMetaData("applyDistinct", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.FILTER, new org.apache.thrift.meta_data.FieldMetaData("filter", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITION_ORDER, new org.apache.thrift.meta_data.FieldMetaData("partitionOrder", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class))));
+    tmpMap.put(_Fields.ASCENDING, new org.apache.thrift.meta_data.FieldMetaData("ascending", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("maxParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesRequest.class, metaDataMap);
+  }
+
+  public PartitionValuesRequest() {
+    this.applyDistinct = true;
+
+    this.ascending = true;
+
+    this.maxParts = -1L;
+
+  }
+
+  public PartitionValuesRequest(
+    String dbName,
+    String tblName,
+    List<FieldSchema> partitionKeys)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+    this.partitionKeys = partitionKeys;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionValuesRequest(PartitionValuesRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetPartitionKeys()) {
+      List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>(other.partitionKeys.size());
+      for (FieldSchema other_element : other.partitionKeys) {
+        __this__partitionKeys.add(new FieldSchema(other_element));
+      }
+      this.partitionKeys = __this__partitionKeys;
+    }
+    this.applyDistinct = other.applyDistinct;
+    if (other.isSetFilter()) {
+      this.filter = other.filter;
+    }
+    if (other.isSetPartitionOrder()) {
+      List<FieldSchema> __this__partitionOrder = new ArrayList<FieldSchema>(other.partitionOrder.size());
+      for (FieldSchema other_element : other.partitionOrder) {
+        __this__partitionOrder.add(new FieldSchema(other_element));
+      }
+      this.partitionOrder = __this__partitionOrder;
+    }
+    this.ascending = other.ascending;
+    this.maxParts = other.maxParts;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public PartitionValuesRequest deepCopy() {
+    return new PartitionValuesRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.partitionKeys = null;
+    this.applyDistinct = true;
+
+    this.filter = null;
+    this.partitionOrder = null;
+    this.ascending = true;
+
+    this.maxParts = -1L;
+
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public int getPartitionKeysSize() {
+    return (this.partitionKeys == null) ? 0 : this.partitionKeys.size();
+  }
+
+  public java.util.Iterator<FieldSchema> getPartitionKeysIterator() {
+    return (this.partitionKeys == null) ? null : this.partitionKeys.iterator();
+  }
+
+  public void addToPartitionKeys(FieldSchema elem) {
+    if (this.partitionKeys == null) {
+      this.partitionKeys = new ArrayList<FieldSchema>();
+    }
+    this.partitionKeys.add(elem);
+  }
+
+  public List<FieldSchema> getPartitionKeys() {
+    return this.partitionKeys;
+  }
+
+  public void setPartitionKeys(List<FieldSchema> partitionKeys) {
+    this.partitionKeys = partitionKeys;
+  }
+
+  public void unsetPartitionKeys() {
+    this.partitionKeys = null;
+  }
+
+  /** Returns true if field partitionKeys is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionKeys() {
+    return this.partitionKeys != null;
+  }
+
+  public void setPartitionKeysIsSet(boolean value) {
+    if (!value) {
+      this.partitionKeys = null;
+    }
+  }
+
+  public boolean isApplyDistinct() {
+    return this.applyDistinct;
+  }
+
+  public void setApplyDistinct(boolean applyDistinct) {
+    this.applyDistinct = applyDistinct;
+    setApplyDistinctIsSet(true);
+  }
+
+  public void unsetApplyDistinct() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __APPLYDISTINCT_ISSET_ID);
+  }
+
+  /** Returns true if field applyDistinct is set (has been assigned a value) and false otherwise */
+  public boolean isSetApplyDistinct() {
+    return EncodingUtils.testBit(__isset_bitfield, __APPLYDISTINCT_ISSET_ID);
+  }
+
+  public void setApplyDistinctIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __APPLYDISTINCT_ISSET_ID, value);
+  }
+
+  public String getFilter() {
+    return this.filter;
+  }
+
+  public void setFilter(String filter) {
+    this.filter = filter;
+  }
+
+  public void unsetFilter() {
+    this.filter = null;
+  }
+
+  /** Returns true if field filter is set (has been assigned a value) and false otherwise */
+  public boolean isSetFilter() {
+    return this.filter != null;
+  }
+
+  public void setFilterIsSet(boolean value) {
+    if (!value) {
+      this.filter = null;
+    }
+  }
+
+  public int getPartitionOrderSize() {
+    return (this.partitionOrder == null) ? 0 : this.partitionOrder.size();
+  }
+
+  public java.util.Iterator<FieldSchema> getPartitionOrderIterator() {
+    return (this.partitionOrder == null) ? null : this.partitionOrder.iterator();
+  }
+
+  public void addToPartitionOrder(FieldSchema elem) {
+    if (this.partitionOrder == null) {
+      this.partitionOrder = new ArrayList<FieldSchema>();
+    }
+    this.partitionOrder.add(elem);
+  }
+
+  public List<FieldSchema> getPartitionOrder() {
+    return this.partitionOrder;
+  }
+
+  public void setPartitionOrder(List<FieldSchema> partitionOrder) {
+    this.partitionOrder = partitionOrder;
+  }
+
+  public void unsetPartitionOrder() {
+    this.partitionOrder = null;
+  }
+
+  /** Returns true if field partitionOrder is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionOrder() {
+    return this.partitionOrder != null;
+  }
+
+  public void setPartitionOrderIsSet(boolean value) {
+    if (!value) {
+      this.partitionOrder = null;
+    }
+  }
+
+  public boolean isAscending() {
+    return this.ascending;
+  }
+
+  public void setAscending(boolean ascending) {
+    this.ascending = ascending;
+    setAscendingIsSet(true);
+  }
+
+  public void unsetAscending() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ASCENDING_ISSET_ID);
+  }
+
+  /** Returns true if field ascending is set (has been assigned a value) and false otherwise */
+  public boolean isSetAscending() {
+    return EncodingUtils.testBit(__isset_bitfield, __ASCENDING_ISSET_ID);
+  }
+
+  public void setAscendingIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ASCENDING_ISSET_ID, value);
+  }
+
+  public long getMaxParts() {
+    return this.maxParts;
+  }
+
+  public void setMaxParts(long maxParts) {
+    this.maxParts = maxParts;
+    setMaxPartsIsSet(true);
+  }
+
+  public void unsetMaxParts() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXPARTS_ISSET_ID);
+  }
+
+  /** Returns true if field maxParts is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaxParts() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAXPARTS_ISSET_ID);
+  }
+
+  public void setMaxPartsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXPARTS_ISSET_ID, value);
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case PARTITION_KEYS:
+      if (value == null) {
+        unsetPartitionKeys();
+      } else {
+        setPartitionKeys((List<FieldSchema>)value);
+      }
+      break;
+
+    case APPLY_DISTINCT:
+      if (value == null) {
+        unsetApplyDistinct();
+      } else {
+        setApplyDistinct((Boolean)value);
+      }
+      break;
+
+    case FILTER:
+      if (value == null) {
+        unsetFilter();
+      } else {
+        setFilter((String)value);
+      }
+      break;
+
+    case PARTITION_ORDER:
+      if (value == null) {
+        unsetPartitionOrder();
+      } else {
+        setPartitionOrder((List<FieldSchema>)value);
+      }
+      break;
+
+    case ASCENDING:
+      if (value == null) {
+        unsetAscending();
+      } else {
+        setAscending((Boolean)value);
+      }
+      break;
+
+    case MAX_PARTS:
+      if (value == null) {
+        unsetMaxParts();
+      } else {
+        setMaxParts((Long)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case PARTITION_KEYS:
+      return getPartitionKeys();
+
+    case APPLY_DISTINCT:
+      return isApplyDistinct();
+
+    case FILTER:
+      return getFilter();
+
+    case PARTITION_ORDER:
+      return getPartitionOrder();
+
+    case ASCENDING:
+      return isAscending();
+
+    case MAX_PARTS:
+      return getMaxParts();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case PARTITION_KEYS:
+      return isSetPartitionKeys();
+    case APPLY_DISTINCT:
+      return isSetApplyDistinct();
+    case FILTER:
+      return isSetFilter();
+    case PARTITION_ORDER:
+      return isSetPartitionOrder();
+    case ASCENDING:
+      return isSetAscending();
+    case MAX_PARTS:
+      return isSetMaxParts();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionValuesRequest)
+      return this.equals((PartitionValuesRequest)that);
+    return false;
+  }
+
+  public boolean equals(PartitionValuesRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_partitionKeys = true && this.isSetPartitionKeys();
+    boolean that_present_partitionKeys = true && that.isSetPartitionKeys();
+    if (this_present_partitionKeys || that_present_partitionKeys) {
+      if (!(this_present_partitionKeys && that_present_partitionKeys))
+        return false;
+      if (!this.partitionKeys.equals(that.partitionKeys))
+        return false;
+    }
+
+    boolean this_present_applyDistinct = true && this.isSetApplyDistinct();
+    boolean that_present_applyDistinct = true && that.isSetApplyDistinct();
+    if (this_present_applyDistinct || that_present_applyDistinct) {
+      if (!(this_present_applyDistinct && that_present_applyDistinct))
+        return false;
+      if (this.applyDistinct != that.applyDistinct)
+        return false;
+    }
+
+    boolean this_present_filter = true && this.isSetFilter();
+    boolean that_present_filter = true && that.isSetFilter();
+    if (this_present_filter || that_present_filter) {
+      if (!(this_present_filter && that_present_filter))
+        return false;
+      if (!this.filter.equals(that.filter))
+        return false;
+    }
+
+    boolean this_present_partitionOrder = true && this.isSetPartitionOrder();
+    boolean that_present_partitionOrder = true && that.isSetPartitionOrder();
+    if (this_present_partitionOrder || that_present_partitionOrder) {
+      if (!(this_present_partitionOrder && that_present_partitionOrder))
+        return false;
+      if (!this.partitionOrder.equals(that.partitionOrder))
+        return false;
+    }
+
+    boolean this_present_ascending = true && this.isSetAscending();
+    boolean that_present_ascending = true && that.isSetAscending();
+    if (this_present_ascending || that_present_ascending) {
+      if (!(this_present_ascending && that_present_ascending))
+        return false;
+      if (this.ascending != that.ascending)
+        return false;
+    }
+
+    boolean this_present_maxParts = true && this.isSetMaxParts();
+    boolean that_present_maxParts = true && that.isSetMaxParts();
+    if (this_present_maxParts || that_present_maxParts) {
+      if (!(this_present_maxParts && that_present_maxParts))
+        return false;
+      if (this.maxParts != that.maxParts)
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_partitionKeys = true && (isSetPartitionKeys());
+    list.add(present_partitionKeys);
+    if (present_partitionKeys)
+      list.add(partitionKeys);
+
+    boolean present_applyDistinct = true && (isSetApplyDistinct());
+    list.add(present_applyDistinct);
+    if (present_applyDistinct)
+      list.add(applyDistinct);
+
+    boolean present_filter = true && (isSetFilter());
+    list.add(present_filter);
+    if (present_filter)
+      list.add(filter);
+
+    boolean present_partitionOrder = true && (isSetPartitionOrder());
+    list.add(present_partitionOrder);
+    if (present_partitionOrder)
+      list.add(partitionOrder);
+
+    boolean present_ascending = true && (isSetAscending());
+    list.add(present_ascending);
+    if (present_ascending)
+      list.add(ascending);
+
+    boolean present_maxParts = true && (isSetMaxParts());
+    list.add(present_maxParts);
+    if (present_maxParts)
+      list.add(maxParts);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionValuesRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionKeys()).compareTo(other.isSetPartitionKeys());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionKeys()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionKeys, other.partitionKeys);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetApplyDistinct()).compareTo(other.isSetApplyDistinct());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetApplyDistinct()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.applyDistinct, other.applyDistinct);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFilter()).compareTo(other.isSetFilter());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFilter()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filter, other.filter);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionOrder()).compareTo(other.isSetPartitionOrder());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionOrder()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionOrder, other.partitionOrder);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAscending()).compareTo(other.isSetAscending());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAscending()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ascending, other.ascending);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMaxParts()).compareTo(other.isSetMaxParts());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaxParts()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxParts, other.maxParts);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionValuesRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("partitionKeys:");
+    if (this.partitionKeys == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partitionKeys);
+    }
+    first = false;
+    if (isSetApplyDistinct()) {
+      if (!first) sb.append(", ");
+      sb.append("applyDistinct:");
+      sb.append(this.applyDistinct);
+      first = false;
+    }
+    if (isSetFilter()) {
+      if (!first) sb.append(", ");
+      sb.append("filter:");
+      if (this.filter == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.filter);
+      }
+      first = false;
+    }
+    if (isSetPartitionOrder()) {
+      if (!first) sb.append(", ");
+      sb.append("partitionOrder:");
+      if (this.partitionOrder == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitionOrder);
+      }
+      first = false;
+    }
+    if (isSetAscending()) {
+      if (!first) sb.append(", ");
+      sb.append("ascending:");
+      sb.append(this.ascending);
+      first = false;
+    }
+    if (isSetMaxParts()) {
+      if (!first) sb.append(", ");
+      sb.append("maxParts:");
+      sb.append(this.maxParts);
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPartitionKeys()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitionKeys' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionValuesRequestStandardSchemeFactory implements SchemeFactory {
+    public PartitionValuesRequestStandardScheme getScheme() {
+      return new PartitionValuesRequestStandardScheme();
+    }
+  }
+
+  private static class PartitionValuesRequestStandardScheme extends StandardScheme<PartitionValuesRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PARTITION_KEYS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list514 = iprot.readListBegin();
+                struct.partitionKeys = new ArrayList<FieldSchema>(_list514.size);
+                FieldSchema _elem515;
+                for (int _i516 = 0; _i516 < _list514.size; ++_i516)
+                {
+                  _elem515 = new FieldSchema();
+                  _elem515.read(iprot);
+                  struct.partitionKeys.add(_elem515);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionKeysIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // APPLY_DISTINCT
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.applyDistinct = iprot.readBool();
+              struct.setApplyDistinctIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // FILTER
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.filter = iprot.readString();
+              struct.setFilterIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // PARTITION_ORDER
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list517 = iprot.readListBegin();
+                struct.partitionOrder = new ArrayList<FieldSchema>(_list517.size);
+                FieldSchema _elem518;
+                for (int _i519 = 0; _i519 < _list517.size; ++_i519)
+                {
+                  _elem518 = new FieldSchema();
+                  _elem518.read(iprot);
+                  struct.partitionOrder.add(_elem518);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionOrderIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // ASCENDING
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.ascending = iprot.readBool();
+              struct.setAscendingIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // MAX_PARTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.maxParts = iprot.readI64();
+              struct.setMaxPartsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partitionKeys != null) {
+        oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size()));
+          for (FieldSchema _iter520 : struct.partitionKeys)
+          {
+            _iter520.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetApplyDistinct()) {
+        oprot.writeFieldBegin(APPLY_DISTINCT_FIELD_DESC);
+        oprot.writeBool(struct.applyDistinct);
+        oprot.writeFieldEnd();
+      }
+      if (struct.filter != null) {
+        if (struct.isSetFilter()) {
+          oprot.writeFieldBegin(FILTER_FIELD_DESC);
+          oprot.writeString(struct.filter);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.partitionOrder != null) {
+        if (struct.isSetPartitionOrder()) {
+          oprot.writeFieldBegin(PARTITION_ORDER_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionOrder.size()));
+            for (FieldSchema _iter521 : struct.partitionOrder)
+            {
+              _iter521.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetAscending()) {
+        oprot.writeFieldBegin(ASCENDING_FIELD_DESC);
+        oprot.writeBool(struct.ascending);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetMaxParts()) {
+        oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC);
+        oprot.writeI64(struct.maxParts);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionValuesRequestTupleSchemeFactory implements SchemeFactory {
+    public PartitionValuesRequestTupleScheme getScheme() {
+      return new PartitionValuesRequestTupleScheme();
+    }
+  }
+
+  private static class PartitionValuesRequestTupleScheme extends TupleScheme<PartitionValuesRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      {
+        oprot.writeI32(struct.partitionKeys.size());
+        for (FieldSchema _iter522 : struct.partitionKeys)
+        {
+          _iter522.write(oprot);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetApplyDistinct()) {
+        optionals.set(0);
+      }
+      if (struct.isSetFilter()) {
+        optionals.set(1);
+      }
+      if (struct.isSetPartitionOrder()) {
+        optionals.set(2);
+      }
+      if (struct.isSetAscending()) {
+        optionals.set(3);
+      }
+      if (struct.isSetMaxParts()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(5);
+      }
+      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetApplyDistinct()) {
+        oprot.writeBool(struct.applyDistinct);
+      }
+      if (struct.isSetFilter()) {
+        oprot.writeString(struct.filter);
+      }
+      if (struct.isSetPartitionOrder()) {
+        {
+          oprot.writeI32(struct.partitionOrder.size());
+          for (FieldSchema _iter523 : struct.partitionOrder)
+          {
+            _iter523.write(oprot);
+          }
+        }
+      }
+      if (struct.isSetAscending()) {
+        oprot.writeBool(struct.ascending);
+      }
+      if (struct.isSetMaxParts()) {
+        oprot.writeI64(struct.maxParts);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list524 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.partitionKeys = new ArrayList<FieldSchema>(_list524.size);
+        FieldSchema _elem525;
+        for (int _i526 = 0; _i526 < _list524.size; ++_i526)
+        {
+          _elem525 = new FieldSchema();
+          _elem525.read(iprot);
+          struct.partitionKeys.add(_elem525);
+        }
+      }
+      struct.setPartitionKeysIsSet(true);
+      BitSet incoming = iprot.readBitSet(6);
+      if (incoming.get(0)) {
+        struct.applyDistinct = iprot.readBool();
+        struct.setApplyDistinctIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.filter = iprot.readString();
+        struct.setFilterIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TList _list527 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.partitionOrder = new ArrayList<FieldSchema>(_list527.size);
+          FieldSchema _elem528;
+          for (int _i529 = 0; _i529 < _list527.size; ++_i529)
+          {
+            _elem528 = new FieldSchema();
+            _elem528.read(iprot);
+            struct.partitionOrder.add(_elem528);
+          }
+        }
+        struct.setPartitionOrderIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.ascending = iprot.readBool();
+        struct.setAscendingIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.maxParts = iprot.readI64();
+        struct.setMaxPartsIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java
new file mode 100644
index 0000000..e336aa1
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionValuesResponse implements org.apache.thrift.TBase<PartitionValuesResponse, PartitionValuesResponse._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionValuesResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionValuesResponse");
+
+  private static final org.apache.thrift.protocol.TField PARTITION_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionValues", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionValuesResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionValuesResponseTupleSchemeFactory());
+  }
+
+  private List<PartitionValuesRow> partitionValues; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARTITION_VALUES((short)1, "partitionValues");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARTITION_VALUES
+          return PARTITION_VALUES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARTITION_VALUES, new org.apache.thrift.meta_data.FieldMetaData("partitionValues", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionValuesRow.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesResponse.class, metaDataMap);
+  }
+
+  public PartitionValuesResponse() {
+  }
+
+  public PartitionValuesResponse(
+    List<PartitionValuesRow> partitionValues)
+  {
+    this();
+    this.partitionValues = partitionValues;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionValuesResponse(PartitionValuesResponse other) {
+    if (other.isSetPartitionValues()) {
+      List<PartitionValuesRow> __this__partitionValues = new ArrayList<PartitionValuesRow>(other.partitionValues.size());
+      for (PartitionValuesRow other_element : other.partitionValues) {
+        __this__partitionValues.add(new PartitionValuesRow(other_element));
+      }
+      this.partitionValues = __this__partitionValues;
+    }
+  }
+
+  public PartitionValuesResponse deepCopy() {
+    return new PartitionValuesResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partitionValues = null;
+  }
+
+  public int getPartitionValuesSize() {
+    return (this.partitionValues == null) ? 0 : this.partitionValues.size();
+  }
+
+  public java.util.Iterator<PartitionValuesRow> getPartitionValuesIterator() {
+    return (this.partitionValues == null) ? null : this.partitionValues.iterator();
+  }
+
+  public void addToPartitionValues(PartitionValuesRow elem) {
+    if (this.partitionValues == null) {
+      this.partitionValues = new ArrayList<PartitionValuesRow>();
+    }
+    this.partitionValues.add(elem);
+  }
+
+  public List<PartitionValuesRow> getPartitionValues() {
+    return this.partitionValues;
+  }
+
+  public void setPartitionValues(List<PartitionValuesRow> partitionValues) {
+    this.partitionValues = partitionValues;
+  }
+
+  public void unsetPartitionValues() {
+    this.partitionValues = null;
+  }
+
+  /** Returns true if field partitionValues is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionValues() {
+    return this.partitionValues != null;
+  }
+
+  public void setPartitionValuesIsSet(boolean value) {
+    if (!value) {
+      this.partitionValues = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARTITION_VALUES:
+      if (value == null) {
+        unsetPartitionValues();
+      } else {
+        setPartitionValues((List<PartitionValuesRow>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARTITION_VALUES:
+      return getPartitionValues();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARTITION_VALUES:
+      return isSetPartitionValues();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionValuesResponse)
+      return this.equals((PartitionValuesResponse)that);
+    return false;
+  }
+
+  public boolean equals(PartitionValuesResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partitionValues = true && this.isSetPartitionValues();
+    boolean that_present_partitionValues = true && that.isSetPartitionValues();
+    if (this_present_partitionValues || that_present_partitionValues) {
+      if (!(this_present_partitionValues && that_present_partitionValues))
+        return false;
+      if (!this.partitionValues.equals(that.partitionValues))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partitionValues = true && (isSetPartitionValues());
+    list.add(present_partitionValues);
+    if (present_partitionValues)
+      list.add(partitionValues);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionValuesResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartitionValues()).compareTo(other.isSetPartitionValues());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionValues()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionValues, other.partitionValues);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionValuesResponse(");
+    boolean first = true;
+
+    sb.append("partitionValues:");
+    if (this.partitionValues == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partitionValues);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPartitionValues()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitionValues' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionValuesResponseStandardSchemeFactory implements SchemeFactory {
+    public PartitionValuesResponseStandardScheme getScheme() {
+      return new PartitionValuesResponseStandardScheme();
+    }
+  }
+
+  private static class PartitionValuesResponseStandardScheme extends StandardScheme<PartitionValuesResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARTITION_VALUES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list538 = iprot.readListBegin();
+                struct.partitionValues = new ArrayList<PartitionValuesRow>(_list538.size);
+                PartitionValuesRow _elem539;
+                for (int _i540 = 0; _i540 < _list538.size; ++_i540)
+                {
+                  _elem539 = new PartitionValuesRow();
+                  _elem539.read(iprot);
+                  struct.partitionValues.add(_elem539);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionValuesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partitionValues != null) {
+        oprot.writeFieldBegin(PARTITION_VALUES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionValues.size()));
+          for (PartitionValuesRow _iter541 : struct.partitionValues)
+          {
+            _iter541.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionValuesResponseTupleSchemeFactory implements SchemeFactory {
+    public PartitionValuesResponseTupleScheme getScheme() {
+      return new PartitionValuesResponseTupleScheme();
+    }
+  }
+
+  private static class PartitionValuesResponseTupleScheme extends TupleScheme<PartitionValuesResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.partitionValues.size());
+        for (PartitionValuesRow _iter542 : struct.partitionValues)
+        {
+          _iter542.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list543 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.partitionValues = new ArrayList<PartitionValuesRow>(_list543.size);
+        PartitionValuesRow _elem544;
+        for (int _i545 = 0; _i545 < _list543.size; ++_i545)
+        {
+          _elem544 = new PartitionValuesRow();
+          _elem544.read(iprot);
+          struct.partitionValues.add(_elem544);
+        }
+      }
+      struct.setPartitionValuesIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java
new file mode 100644
index 0000000..082c6c2
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionValuesRow.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionValuesRow implements org.apache.thrift.TBase<PartitionValuesRow, PartitionValuesRow._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionValuesRow> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionValuesRow");
+
+  private static final org.apache.thrift.protocol.TField ROW_FIELD_DESC = new org.apache.thrift.protocol.TField("row", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionValuesRowStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionValuesRowTupleSchemeFactory());
+  }
+
+  private List<String> row; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ROW((short)1, "row");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ROW
+          return ROW;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ROW, new org.apache.thrift.meta_data.FieldMetaData("row", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionValuesRow.class, metaDataMap);
+  }
+
+  public PartitionValuesRow() {
+  }
+
+  public PartitionValuesRow(
+    List<String> row)
+  {
+    this();
+    this.row = row;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionValuesRow(PartitionValuesRow other) {
+    if (other.isSetRow()) {
+      List<String> __this__row = new ArrayList<String>(other.row);
+      this.row = __this__row;
+    }
+  }
+
+  public PartitionValuesRow deepCopy() {
+    return new PartitionValuesRow(this);
+  }
+
+  @Override
+  public void clear() {
+    this.row = null;
+  }
+
+  public int getRowSize() {
+    return (this.row == null) ? 0 : this.row.size();
+  }
+
+  public java.util.Iterator<String> getRowIterator() {
+    return (this.row == null) ? null : this.row.iterator();
+  }
+
+  public void addToRow(String elem) {
+    if (this.row == null) {
+      this.row = new ArrayList<String>();
+    }
+    this.row.add(elem);
+  }
+
+  public List<String> getRow() {
+    return this.row;
+  }
+
+  public void setRow(List<String> row) {
+    this.row = row;
+  }
+
+  public void unsetRow() {
+    this.row = null;
+  }
+
+  /** Returns true if field row is set (has been assigned a value) and false otherwise */
+  public boolean isSetRow() {
+    return this.row != null;
+  }
+
+  public void setRowIsSet(boolean value) {
+    if (!value) {
+      this.row = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ROW:
+      if (value == null) {
+        unsetRow();
+      } else {
+        setRow((List<String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ROW:
+      return getRow();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ROW:
+      return isSetRow();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionValuesRow)
+      return this.equals((PartitionValuesRow)that);
+    return false;
+  }
+
+  public boolean equals(PartitionValuesRow that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_row = true && this.isSetRow();
+    boolean that_present_row = true && that.isSetRow();
+    if (this_present_row || that_present_row) {
+      if (!(this_present_row && that_present_row))
+        return false;
+      if (!this.row.equals(that.row))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_row = true && (isSetRow());
+    list.add(present_row);
+    if (present_row)
+      list.add(row);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionValuesRow other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetRow()).compareTo(other.isSetRow());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRow()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.row, other.row);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionValuesRow(");
+    boolean first = true;
+
+    sb.append("row:");
+    if (this.row == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.row);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetRow()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'row' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionValuesRowStandardSchemeFactory implements SchemeFactory {
+    public PartitionValuesRowStandardScheme getScheme() {
+      return new PartitionValuesRowStandardScheme();
+    }
+  }
+
+  private static class PartitionValuesRowStandardScheme extends StandardScheme<PartitionValuesRow> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionValuesRow struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ROW
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list530 = iprot.readListBegin();
+                struct.row = new ArrayList<String>(_list530.size);
+                String _elem531;
+                for (int _i532 = 0; _i532 < _list530.size; ++_i532)
+                {
+                  _elem531 = iprot.readString();
+                  struct.row.add(_elem531);
+                }
+                iprot.readListEnd();
+              }
+              struct.setRowIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionValuesRow struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.row != null) {
+        oprot.writeFieldBegin(ROW_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.row.size()));
+          for (String _iter533 : struct.row)
+          {
+            oprot.writeString(_iter533);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionValuesRowTupleSchemeFactory implements SchemeFactory {
+    public PartitionValuesRowTupleScheme getScheme() {
+      return new PartitionValuesRowTupleScheme();
+    }
+  }
+
+  private static class PartitionValuesRowTupleScheme extends TupleScheme<PartitionValuesRow> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.row.size());
+        for (String _iter534 : struct.row)
+        {
+          oprot.writeString(_iter534);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionValuesRow struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list535 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.row = new ArrayList<String>(_list535.size);
+        String _elem536;
+        for (int _i537 = 0; _i537 < _list535.size; ++_i537)
+        {
+          _elem536 = iprot.readString();
+          struct.row.add(_elem536);
+        }
+      }
+      struct.setRowIsSet(true);
+    }
+  }
+
+}
+


[70/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
index 0000000,b5d147b..27d96e5
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java
@@@ -1,0 -1,3699 +1,3740 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ 
+ import java.io.IOException;
+ import java.nio.ByteBuffer;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.annotation.NoReconnect;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.CheckConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.CmRecycleRequest;
+ import org.apache.hadoop.hive.metastore.api.CmRecycleResponse;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CommitTxnRequest;
+ import org.apache.hadoop.hive.metastore.api.CompactionResponse;
+ import org.apache.hadoop.hive.metastore.api.CompactionType;
+ import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException;
+ import org.apache.hadoop.hive.metastore.api.CreationMetadata;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.DataOperationType;
+ import org.apache.hadoop.hive.metastore.api.DefaultConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.FindSchemasByColsResp;
+ import org.apache.hadoop.hive.metastore.api.FindSchemasByColsRqst;
+ import org.apache.hadoop.hive.metastore.api.FireEventRequest;
+ import org.apache.hadoop.hive.metastore.api.FireEventResponse;
+ import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse;
+ import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse;
+ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest;
+ import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse;
+ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest;
+ import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse;
+ import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.ISchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
+ import org.apache.hadoop.hive.metastore.api.LockRequest;
+ import org.apache.hadoop.hive.metastore.api.LockResponse;
+ import org.apache.hadoop.hive.metastore.api.Materialization;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.MetadataPpdResult;
+ import org.apache.hadoop.hive.metastore.api.NoSuchLockException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchTxnException;
+ import org.apache.hadoop.hive.metastore.api.NotNullConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
+ import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PartitionEventType;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesRequest;
+ import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
+ import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
+ import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.RuntimeStat;
+ import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersion;
+ import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest;
+ import org.apache.hadoop.hive.metastore.api.ShowCompactResponse;
+ import org.apache.hadoop.hive.metastore.api.ShowLocksRequest;
+ import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.TableMeta;
+ import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
+ import org.apache.hadoop.hive.metastore.api.TxnAbortedException;
+ import org.apache.hadoop.hive.metastore.api.TxnOpenException;
+ import org.apache.hadoop.hive.metastore.api.TxnToWriteId;
+ import org.apache.hadoop.hive.metastore.api.UniqueConstraintsRequest;
+ import org.apache.hadoop.hive.metastore.api.UnknownDBException;
+ import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
+ import org.apache.hadoop.hive.metastore.api.UnknownTableException;
+ import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMMapping;
+ import org.apache.hadoop.hive.metastore.api.WMNullablePool;
+ import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMPool;
+ import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
+ import org.apache.hadoop.hive.metastore.api.WMTrigger;
+ import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.api.WriteNotificationLogRequest;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.thrift.TException;
+ 
+ /**
+  * Wrapper around hive metastore thrift api
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public interface IMetaStoreClient {
+ 
+   /**
+    * Returns whether current client is compatible with conf argument or not
+    * @return
+    */
+   boolean isCompatibleWith(Configuration conf);
+ 
+   /**
+    * Set added jars path info to MetaStoreClient.
+    * @param addedJars the hive.added.jars.path. It is qualified paths separated by commas.
+    */
+   void setHiveAddedJars(String addedJars);
+ 
+   /**
+    * Returns true if the current client is using an in process metastore (local metastore).
+    *
+    * @return
+    */
+   boolean isLocalMetaStore();
+ 
+   /**
+    *  Tries to reconnect this MetaStoreClient to the MetaStore.
+    */
+   void reconnect() throws MetaException;
+ 
+   /**
+    * close connection to meta store
+    */
+   @NoReconnect
+   void close();
+ 
+   /**
+    * set meta variable which is open to end users
+    */
+   void setMetaConf(String key, String value) throws MetaException, TException;
+ 
+   /**
+    * get current meta variable
+    */
+   String getMetaConf(String key) throws MetaException, TException;
+ 
+   /**
+    * Create a new catalog.
+    * @param catalog catalog object to create.
+    * @throws AlreadyExistsException A catalog of this name already exists.
+    * @throws InvalidObjectException There is something wrong with the passed in catalog object.
+    * @throws MetaException something went wrong, usually either in the database or trying to
+    * create the directory for the catalog.
+    * @throws TException general thrift exception.
+    */
+   void createCatalog(Catalog catalog)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException;
+ 
+   /**
+    * Alter an existing catalog.
+    * @param catalogName the name of the catalog to alter.
+    * @param newCatalog the new catalog object.  All relevant details of the catalog should be
+    *                   set, don't rely on the system to figure out what you changed and only copy
+    *                   that in.
+    * @throws NoSuchObjectException no catalog of this name exists
+    * @throws InvalidObjectException an attempt was made to make an unsupported change (such as
+    * catalog name).
+    * @throws MetaException usually indicates a database error
+    * @throws TException general thrift exception
+    */
+   void alterCatalog(String catalogName, Catalog newCatalog)
+       throws NoSuchObjectException, InvalidObjectException, MetaException, TException;
+ 
+   /**
+    * Get a catalog object.
+    * @param catName Name of the catalog to fetch.
+    * @return The catalog.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   Catalog getCatalog(String catName) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of all catalogs known to the system.
+    * @return list of catalog names
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   List<String> getCatalogs() throws MetaException, TException;
+ 
+   /**
+    * Drop a catalog.  Catalogs must be empty to be dropped, there is no cascade for dropping a
+    * catalog.
+    * @param catName name of the catalog to drop
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws InvalidOperationException The catalog is not empty and cannot be dropped.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws TException general thrift exception.
+    */
+   void dropCatalog(String catName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Get the names of all databases in the default catalog that match the given pattern.
+    * @param databasePattern pattern for the database name to patch
+    * @return List of database names.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getDatabases(String databasePattern) throws MetaException, TException;
+ 
+   /**
+    * Get all databases in a catalog whose names match a pattern.
+    * @param catName  catalog name.  Can be null, in which case the default catalog is assumed.
+    * @param databasePattern pattern for the database name to match
+    * @return list of database names
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getDatabases(String catName, String databasePattern)
+       throws MetaException, TException;
+ 
+   /**
+    * Get the names of all databases in the MetaStore.
+    * @return List of database names in the default catalog.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getAllDatabases() throws MetaException, TException;
+ 
+   /**
+    * Get all databases in a catalog.
+    * @param catName catalog name.  Can be null, in which case the default catalog is assumed.
+    * @return list of all database names
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> getAllDatabases(String catName) throws MetaException, TException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern.
+    * @param dbName database name.
+    * @param tablePattern pattern for table name to conform to
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database to search in does not exist.
+    */
+   List<String> getTables(String dbName, String tablePattern)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tablePattern pattern for table name to conform to
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException general thrift error
+    * @throws UnknownDBException indicated database to search in does not exist.
+    */
+   List<String> getTables(String catName, String dbName, String tablePattern)
+       throws MetaException, TException, UnknownDBException;
+ 
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW)
+    * @param dbName Name of the database to fetch tables in.
+    * @param tablePattern pattern to match for table names.
+    * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views.
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database does not exist.
+    */
+   List<String> getTables(String dbName, String tablePattern, TableType tableType)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database that satisfy the supplied
+    * table name pattern and table type (MANAGED_TABLE || EXTERNAL_TABLE || VIRTUAL_VIEW)
+    * @param catName catalog name.
+    * @param dbName Name of the database to fetch tables in.
+    * @param tablePattern pattern to match for table names.
+    * @param tableType Type of the table in the HMS store. VIRTUAL_VIEW is for views.
+    * @return List of table names.
+    * @throws MetaException error fetching information from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException indicated database does not exist.
+    */
+   List<String> getTables(String catName, String dbName, String tablePattern, TableType tableType)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get materialized views that have rewriting enabled.  This will use the default catalog.
+    * @param dbName Name of the database to fetch materialized views from.
+    * @return List of materialized view names.
+    * @throws MetaException error fetching from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get materialized views that have rewriting enabled.
+    * @param catName catalog name.
+    * @param dbName Name of the database to fetch materialized views from.
+    * @return List of materialized view names.
+    * @throws MetaException error fetching from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Fetches just table name and comments.  Useful when you need full table name
+    * (catalog.database.table) but don't need extra information like partition columns that
+    * require additional fetches from the database.
+    * @param dbPatterns database pattern to match, or null for all databases
+    * @param tablePatterns table pattern to match.
+    * @param tableTypes list of table types to fetch.
+    * @return list of TableMeta objects with information on matching tables
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Fetches just table name and comments.  Useful when you need full table name
+    * (catalog.database.table) but don't need extra information like partition columns that
+    * require additional fetches from the database.
+    * @param catName catalog to search in.  Search cannot cross catalogs.
+    * @param dbPatterns database pattern to match, or null for all databases
+    * @param tablePatterns table pattern to match.
+    * @param tableTypes list of table types to fetch.
+    * @return list of TableMeta objects with information on matching tables
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<TableMeta> getTableMeta(String catName, String dbPatterns, String tablePatterns,
+                                List<String> tableTypes)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database.
+    * @param dbName database name
+    * @return List of table names.
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<String> getAllTables(String dbName) throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get the names of all tables in the specified database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return List of table names.
+    * @throws MetaException something went wrong with the fetch from the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException No databases match the provided pattern.
+    */
+   List<String> getAllTables(String catName, String dbName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get a list of table names that match a filter.
+    * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
+    *
+    * In the filter statement, values interpreted as strings must be enclosed in quotes,
+    * while values interpreted as integers should not be.  Strings and integers are the only
+    * supported value types.
+    *
+    * The currently supported key names in the filter are:
+    * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+    *   and supports all filter operators
+    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+    *   and supports all filter operators except LIKE
+    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+    *   and only supports the filter operators = and &lt;&gt;.
+    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+    *   For example, to filter on parameter keys called "retention", the key name in the filter
+    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+    *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+    *   E.g., filtering on tables where key1 &lt;&gt; value will only
+    *   return tables that have a value for the parameter key1.
+    * Some example filter statements include:
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+    *   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")"
+    *
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param maxTables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws InvalidOperationException invalid filter
+    * @throws UnknownDBException no such database
+    * @throws TException thrift transport error
+    */
+   List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws TException, InvalidOperationException, UnknownDBException;
+ 
+   /**
+    * Get a list of table names that match a filter.
+    * The filter operators are LIKE, &lt;, &lt;=, &gt;, &gt;=, =, &lt;&gt;
+    *
+    * In the filter statement, values interpreted as strings must be enclosed in quotes,
+    * while values interpreted as integers should not be.  Strings and integers are the only
+    * supported value types.
+    *
+    * The currently supported key names in the filter are:
+    * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+    *   and supports all filter operators
+    * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+    *   and supports all filter operators except LIKE
+    * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+    *   and only supports the filter operators = and &lt;&gt;.
+    *   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+    *   For example, to filter on parameter keys called "retention", the key name in the filter
+    *   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+    *   Also, = and &lt;&gt; only work for keys that exist in the tables.
+    *   E.g., filtering on tables where key1 &lt;&gt; value will only
+    *   return tables that have a value for the parameter key1.
+    * Some example filter statements include:
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+    *   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+    * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+    *   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")"
+    *
+    * @param catName catalog name
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param maxTables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws InvalidOperationException invalid filter
+    * @throws UnknownDBException no such database
+    * @throws TException thrift transport error
+    */
+   List<String> listTableNamesByFilter(String catName, String dbName, String filter, int maxTables)
+       throws TException, InvalidOperationException, UnknownDBException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @param deleteData
+    *          Should we delete the underlying data
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    *
+    */
+   void dropTable(String dbname, String tableName, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @param deleteData
+    *          Should we delete the underlying data
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param ifPurge
+    *          completely purge the table (skipping trash) while removing data from warehouse
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    */
+   void dropTable(String dbname, String tableName, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException,
+       NoSuchObjectException;
+ 
+   /**
+    * Drop the table.
+    *
+    * @param dbname
+    *          The database for this table
+    * @param tableName
+    *          The table to drop
+    * @throws MetaException
+    *           Could not drop table properly.
+    * @throws NoSuchObjectException
+    *           The table wasn't found.
+    * @throws TException
+    *           A thrift communication error occurred
+    */
+   void dropTable(String dbname, String tableName)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Drop a table.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @param deleteData whether associated data should be deleted.
+    * @param ignoreUnknownTable whether a non-existent table name should be ignored
+    * @param ifPurge whether dropped data should be immediately removed rather than placed in HDFS
+    *               trash.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                  boolean ignoreUnknownTable, boolean ifPurge)
+     throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Drop a table.  Equivalent to
+    * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with ifPurge set to
+    * false.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @param deleteData whether associated data should be deleted.
+    * @param ignoreUnknownTable whether a non-existent table name should be ignored
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   default void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                          boolean ignoreUnknownTable)
+     throws MetaException, NoSuchObjectException, TException {
+     dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, false);
+   }
+ 
+   /**
+    * Drop a table.  Equivalent to
+    * {@link #dropTable(String, String, String, boolean, boolean, boolean)} with deleteData
+    * set and ignoreUnknownTable set to true and ifPurge set to false.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    * @throws NoSuchObjectException No table of this name exists, only thrown if
+    * ignoreUnknownTable is false.
+    * @throws TException general thrift error.
+    */
+   default void dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, TException {
+     dropTable(catName, dbName, tableName, true, true, false);
+   }
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException Failure in the RDBMS or storage
+    * @throws TException Thrift transport exception
+    */
+   void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException;
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param catName catalog name
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException Failure in the RDBMS or storage
+    * @throws TException Thrift transport exception
+    */
+   void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
+       throws MetaException, TException;
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException;
+ 
+   /**
+    * Check whether a table exists in the default catalog.
+    * @param databaseName database name
+    * @param tableName table name
+    * @return true if the indicated table exists, false if not
+    * @throws MetaException error fetching form the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException the indicated database does not exist.
+    */
+   boolean tableExists(String databaseName, String tableName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Check whether a table exists.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return true if the indicated table exists, false if not
+    * @throws MetaException error fetching form the RDBMS
+    * @throws TException thrift transport error
+    * @throws UnknownDBException the indicated database does not exist.
+    */
+   boolean tableExists(String catName, String dbName, String tableName)
+       throws MetaException, TException, UnknownDBException;
+ 
+   /**
+    * Get a Database Object in the default catalog
+    * @param databaseName  name of the database to fetch
+    * @return the database
+    * @throws NoSuchObjectException The database does not exist
+    * @throws MetaException Could not fetch the database
+    * @throws TException A thrift communication error occurred
+    */
+   Database getDatabase(String databaseName)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a database.
+    * @param catalogName catalog name.  Can be null, in which case
+    * {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param databaseName database name
+    * @return the database object
+    * @throws NoSuchObjectException No database with this name exists in the specified catalog
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift error
+    */
+   Database getDatabase(String catalogName, String databaseName)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a table object in the default catalog.
+    *
+    * @param dbName
+    *          The database the table is located in.
+    * @param tableName
+    *          Name of the table to fetch.
+    * @return An object representing the table.
+    * @throws MetaException
+    *           Could not fetch the table
+    * @throws TException
+    *           A thrift communication error occurred
+    * @throws NoSuchObjectException
+    *           In case the table wasn't found.
+    */
+   Table getTable(String dbName, String tableName) throws MetaException,
+       TException, NoSuchObjectException;
+ 
++  Table getTable(String dbName, String tableName,
++                 long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException;
++
+   /**
+    * Get a table object.
+    * @param catName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @return table object.
+    * @throws MetaException Something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   Table getTable(String catName, String dbName, String tableName) throws MetaException, TException;
+ 
++  Table getTable(String catName, String dbName, String tableName,
++                        long txnId, String validWriteIdList) throws TException;
+   /**
+    * Get tables as objects (rather than just fetching their names).  This is more expensive and
+    * should only be used if you actually need all the information about the tables.
+    * @param dbName
+    *          The database the tables are located in.
+    * @param tableNames
+    *          The names of the tables to fetch
+    * @return A list of objects representing the tables.
+    *          Only the tables that can be retrieved from the database are returned.  For example,
+    *          if none of the requested tables could be retrieved, an empty list is returned.
+    *          There is no guarantee of ordering of the returned tables.
+    * @throws InvalidOperationException
+    *          The input to this operation is invalid (e.g., the list of tables names is null)
+    * @throws UnknownDBException
+    *          The requested database could not be fetched.
+    * @throws TException
+    *          A thrift communication error occurred
+    * @throws MetaException
+    *          Any other errors
+    */
+   List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Get tables as objects (rather than just fetching their names).  This is more expensive and
+    * should only be used if you actually need all the information about the tables.
+    * @param catName catalog name
+    * @param dbName
+    *          The database the tables are located in.
+    * @param tableNames
+    *          The names of the tables to fetch
+    * @return A list of objects representing the tables.
+    *          Only the tables that can be retrieved from the database are returned.  For example,
+    *          if none of the requested tables could be retrieved, an empty list is returned.
+    *          There is no guarantee of ordering of the returned tables.
+    * @throws InvalidOperationException
+    *          The input to this operation is invalid (e.g., the list of tables names is null)
+    * @throws UnknownDBException
+    *          The requested database could not be fetched.
+    * @throws TException
+    *          A thrift communication error occurred
+    * @throws MetaException
+    *          Any other errors
+    */
+   List<Table> getTableObjectsByName(String catName, String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Returns the invalidation information for the materialized views given as input.
+    */
+   Map<String, Materialization> getMaterializationsInvalidationInfo(String dbName, List<String> viewNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException;
+ 
+   /**
+    * Updates the creation metadata for the materialized view.
+    */
+   void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, TException;
+ 
+   /**
+    * Updates the creation metadata for the materialized view.
+    */
+   void updateCreationMetadata(String catName, String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, TException;
+ 
+   /**
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition values set.
+    * @param dbName database name
+    * @param tableName table name
+    * @param partVals partition values
+    * @return the partition object
+    * @throws InvalidObjectException no such table
+    * @throws AlreadyExistsException a partition with these values already exists
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String dbName, String tableName, List<String> partVals)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition values set.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partVals partition values
+    * @return the partition object
+    * @throws InvalidObjectException no such table
+    * @throws AlreadyExistsException a partition with these values already exists
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String catName, String dbName, String tableName, List<String> partVals)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition value set.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param name name of the partition, should be in the form partkey=partval.
+    * @return new partition object.
+    * @throws InvalidObjectException No such table.
+    * @throws AlreadyExistsException Partition of this name already exists.
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String dbName, String tableName, String name)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to a table and get back the resulting Partition object.  This creates an
+    * empty default partition with just the partition value set.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param name name of the partition, should be in the form partkey=partval.
+    * @return new partition object.
+    * @throws InvalidObjectException No such table.
+    * @throws AlreadyExistsException Partition of this name already exists.
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   Partition appendPartition(String catName, String dbName, String tableName, String name)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partition to the table.
+    *
+    * @param partition
+    *          The partition to add
+    * @return The partition added
+    * @throws InvalidObjectException
+    *           Could not find table to add to
+    * @throws AlreadyExistsException
+    *           Partition already exists
+    * @throws MetaException
+    *           Could not add partition
+    * @throws TException
+    *           Thrift exception
+    */
+   Partition add_partition(Partition partition)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add partitions to the table.
+    *
+    * @param partitions
+    *          The partitions to add
+    * @throws InvalidObjectException
+    *           Could not find table to add to
+    * @throws AlreadyExistsException
+    *           Partition already exists
+    * @throws MetaException
+    *           Could not add partition
+    * @throws TException
+    *           Thrift exception
+    */
+   int add_partitions(List<Partition> partitions)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add a partitions using a spec proxy.
+    * @param partitionSpec partition spec proxy
+    * @return number of partitions that were added
+    * @throws InvalidObjectException the partitionSpec is malformed.
+    * @throws AlreadyExistsException one or more of the partitions already exist.
+    * @throws MetaException error accessing the RDBMS or storage.
+    * @throws TException thrift transport error
+    */
+   int add_partitions_pspec(PartitionSpecProxy partitionSpec)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Add partitions to the table.
+    *
+    * @param partitions The partitions to add
+    * @param ifNotExists only add partitions if they don't exist
+    * @param needResults Whether the results are needed
+    * @return the partitions that were added, or null if !needResults
+    */
+   List<Partition> add_partitions(
+       List<Partition> partitions, boolean ifNotExists, boolean needResults)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Get a partition.
+    * @param dbName database name
+    * @param tblName table name
+    * @param partVals partition values for this partition, must be in the same order as the
+    *                 partition keys of the table.
+    * @return the partition object
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String dbName, String tblName, List<String> partVals)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tblName table name
+    * @param partVals partition values for this partition, must be in the same order as the
+    *                 partition keys of the table.
+    * @return the partition object
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String catName, String dbName, String tblName, List<String> partVals)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Move a partition from one table to another
+    * @param partitionSpecs key value pairs that describe the partition to be moved.
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @return partition object
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    */
+   Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destdb,
+       String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * Move a partition from one table to another
+    * @param partitionSpecs key value pairs that describe the partition to be moved.
+    * @param sourceCat catalog of the source table
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destCat catalog of the destination table, for now must the same as sourceCat
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @return partition object
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    */
+   Partition exchange_partition(Map<String, String> partitionSpecs, String sourceCat,
+                                String sourceDb, String sourceTable, String destCat, String destdb,
+                                String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+    * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+    * belonging to it exchanged. This function returns the list of affected partitions.
+    * @param partitionSpecs key value pairs that describe the partition(s) to be moved.
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    * @return the list of the new partitions
+    */
+   List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destdb,
+       String destTableName) throws MetaException, NoSuchObjectException,
+       InvalidObjectException, TException;
+ 
+   /**
+    * With the one partitionSpecs to exchange, multiple partitions could be exchanged.
+    * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions
+    * belonging to it exchanged. This function returns the list of affected partitions.
+    * @param partitionSpecs key value pairs that describe the partition(s) to be moved.
+    * @param sourceCat catalog of the source table
+    * @param sourceDb database of the source table
+    * @param sourceTable name of the source table
+    * @param destCat catalog of the destination table, for now must the same as sourceCat
+    * @param destdb database of the destination table
+    * @param destTableName name of the destination table
+    * @throws MetaException error accessing the RDBMS or storage
+    * @throws NoSuchObjectException no such table, for either source or destination table
+    * @throws InvalidObjectException error in partition specifications
+    * @throws TException thrift transport error
+    * @return the list of the new partitions
+    */
+   List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
+                                       String sourceDb, String sourceTable, String destCat,
+                                       String destdb, String destTableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, TException;
+ 
+   /**
+    * Get a Partition by name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+    * @return the partition object
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String dbName, String tblName, String name)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a Partition by name.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01'
+    * @return the partition object
+    * @throws MetaException error access the RDBMS.
+    * @throws TException thrift transport error
+    */
+   Partition getPartition(String catName, String dbName, String tblName, String name)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+ 
+   /**
+    * Get a Partition along with authorization information.
+    * @param dbName database name
+    * @param tableName table name
+    * @param pvals partition values, must be in the same order as the tables partition keys
+    * @param userName name of the calling user
+    * @param groupNames groups the call
+    * @return the partition
+    * @throws MetaException error accessing the RDBMS
+    * @throws UnknownTableException no such table
+    * @throws NoSuchObjectException no such partition
+    * @throws TException thrift transport error
+    */
+   Partition getPartitionWithAuthInfo(String dbName, String tableName,
+       List<String> pvals, String userName, List<String> groupNames)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a Partition along with authorization information.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param pvals partition values, must be in the same order as the tables partition keys
+    * @param userName name of the calling user
+    * @param groupNames groups the call
+    * @return the partition
+    * @throws MetaException error accessing the RDBMS
+    * @throws UnknownTableException no such table
+    * @throws NoSuchObjectException no such partition
+    * @throws TException thrift transport error
+    */
+   Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName,
+                                      List<String> pvals, String userName, List<String> groupNames)
+       throws MetaException, UnknownTableException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partittions for a table.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param max_parts maximum number of parts to return, -1 for all
+    * @return the list of partitions
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitions(String db_name, String tbl_name, short max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partittions for a table.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param max_parts maximum number of parts to return, -1 for all
+    * @return the list of partitions
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitions(String catName, String db_name, String tbl_name, int max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partitions from a table, returned in the form of PartitionSpecProxy
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @return a PartitionSpecProxy
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts)
+     throws TException;
+ 
+   /**
+    * Get a list of partitions from a table, returned in the form of PartitionSpecProxy
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @return a PartitionSpecProxy
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName,
+                                         int maxParts) throws TException;
+ 
+   /**
+    * Get a list of partitions based on a (possibly partial) list of partition values.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values, in order of the table partition keys.  These can be
+    *                  partial, or .* to match all values for a particular key.
+    * @param max_parts maximum number of partitions to return, or -1 for all.
+    * @return list of partitions
+    * @throws NoSuchObjectException no such table.
+    * @throws MetaException error accessing the database or processing the partition values.
+    * @throws TException thrift transport error.
+    */
+   List<Partition> listPartitions(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partitions based on a (possibly partial) list of partition values.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values, in order of the table partition keys.  These can be
+    *                  partial, or .* to match all values for a particular key.
+    * @param max_parts maximum number of partitions to return, or -1 for all.
+    * @return list of partitions
+    * @throws NoSuchObjectException no such table.
+    * @throws MetaException error accessing the database or processing the partition values.
+    * @throws TException thrift transport error.
+    */
+   List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                  List<String> part_vals, int max_parts)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List Names of partitions in a table.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of parts of fetch, or -1 to fetch them all.
+    * @return list of partition names.
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> listPartitionNames(String db_name, String tbl_name,
+       short max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List Names of partitions in a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of parts of fetch, or -1 to fetch them all.
+    * @return list of partition names.
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                   int max_parts) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get a list of partition names matching a partial specification of the partition values.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partial list of partition values.  These must be given in the order of the
+    *                  partition keys.  If you wish to accept any value for a particular key you
+    *                  can pass ".*" for that value in this list.
+    * @param max_parts maximum number of partition names to return, or -1 to return all that are
+    *                  found.
+    * @return list of matching partition names.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get a list of partition names matching a partial specification of the partition values.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partial list of partition values.  These must be given in the order of the
+    *                  partition keys.  If you wish to accept any value for a particular key you
+    *                  can pass ".*" for that value in this list.
+    * @param max_parts maximum number of partition names to return, or -1 to return all that are
+    *                  found.
+    * @return list of matching partition names.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                   List<String> part_vals, int max_parts)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get a list of partition values
+    * @param request request
+    * @return reponse
+    * @throws MetaException error accessing RDBMS
+    * @throws TException thrift transport error
+    * @throws NoSuchObjectException no such table
+    */
+   PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param dbName the database name
+    * @param tableName the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException no such table
+    * @throws TException thrift transport error
+    */
+   int getNumPartitionsByFilter(String dbName, String tableName,
+                                String filter) throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param catName catalog name
+    * @param dbName the database name
+    * @param tableName the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException no such table
+    * @throws TException thrift transport error
+    */
+   int getNumPartitionsByFilter(String catName, String dbName, String tableName,
+                                String filter) throws MetaException, NoSuchObjectException, TException;
+ 
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException No such table.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param catName catalog name.
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException No such table.
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsByFilter(String catName, String db_name, String tbl_name,
+                                          String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to
+    * fetch.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param filter SQL where clause filter
+    * @param max_parts maximum number of partitions to fetch, or -1 for all
+    * @return PartitionSpec
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException No table matches the request
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                 String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get a list of partitions in a PartitionSpec, using a filter to select which partitions to
+    * fetch.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param filter SQL where clause filter
+    * @param max_parts maximum number of partitions to fetch, or -1 for all
+    * @return PartitionSpec
+    * @throws MetaException error accessing RDBMS or processing the filter
+    * @throws NoSuchObjectException No table matches the request
+    * @throws TException thrift transport error
+    */
+   PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name, String tbl_name,
+                                                 String filter, int max_parts)
+       throws MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Get list of partitions matching specified serialized expression
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param expr expression, serialized from ExprNodeDesc
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @param default_partition_name Default partition name from configuration. If blank, the
+    *    metastore server-side configuration is used.
+    * @param result the resulting list of partitions
+    * @return whether the resulting list contains partitions which may or may not match the expr
+    * @throws TException thrift transport error or error executing the filter.
+    */
+   boolean listPartitionsByExpr(String db_name, String tbl_name,
+       byte[] expr, String default_partition_name, short max_parts, List<Partition> result)
+           throws TException;
+ 
+   /**
+    * Get list of partitions matching specified serialized expression
+    * @param catName catalog name
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param expr expression, serialized from ExprNodeDesc
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @param default_partition_name Default partition name from configuration. If blank, the
+    *    metastore server-side configuration is used.
+    * @param result the resulting list of partitions
+    * @return whether the resulting list contains partitions which may or may not match the expr
+    * @throws TException thrift transport error or error executing the filter.
+    */
+   boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr,
+                                String default_partition_name, int max_parts, List<Partition> result)
+       throws TException;
+ 
+   /**
+    * List partitions, fetching the authorization information along with the partitions.
+    * @param dbName database name
+    * @param tableName table name
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privileges for
+    * @param groupNames groups to fetch privileges for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String dbName,
+       String tableName, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * List partitions, fetching the authorization information along with the partitions.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privileges for
+    * @param groupNames groups to fetch privileges for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                              int maxParts, String userName, List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions by a list of partition names.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_names list of partition names
+    * @return list of Partition objects
+    * @throws NoSuchObjectException No such partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Get partitions by a list of partition names.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_names list of partition names
+    * @return list of Partition objects
+    * @throws NoSuchObjectException No such partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws TException thrift transport error
+    */
+   List<Partition> getPartitionsByNames(String catName, String db_name, String tbl_name,
+                                        List<String> part_names)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * List partitions along with privilege information for a user or groups
+    * @param dbName database name
+    * @param tableName table name
+    * @param partialPvals partition values, can be partial
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privilege information for
+    * @param groupNames group to fetch privilege information for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String dbName,
+       String tableName, List<String> partialPvals, short maxParts, String userName,
+       List<String> groupNames) throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * List partitions along with privilege information for a user or groups
+    * @param dbName database name
+    * @param tableName table name
+    * @param partialPvals partition values, can be partial
+    * @param maxParts maximum number of partitions to fetch, or -1 for all
+    * @param userName user to fetch privilege information for
+    * @param groupNames group to fetch privilege information for
+    * @return the list of partitions
+    * @throws NoSuchObjectException no partitions matching the criteria were found
+    * @throws MetaException error accessing the RDBMS
+    * @throws TException thrift transport error
+    */
+   List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                              List<String> partialPvals, int maxParts, String userName,
+                                              List<String> groupNames)
+       throws MetaException, TException, NoSuchObjectException;
+ 
+   /**
+    * Mark an event as having occurred on a partition.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param partKVs key value pairs that describe the partition
+    * @param eventType type of the event
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> partKVs,
+       PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Mark an event as having occurred on a partition.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param partKVs key value pairs that describe the partition
+    * @param eventType type of the event
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   void markPartitionForEvent(String catName, String db_name, String tbl_name, Map<String,String> partKVs,
+                              PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Determine whether a partition has been marked with a particular event type.
+    * @param db_name database name
+    * @param tbl_name table name.
+    * @param partKVs key value pairs that describe the partition.
+    * @param eventType event type
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> partKVs,
+       PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * Determine whether a partition has been marked with a particular event type.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name.
+    * @param partKVs key value pairs that describe the partition.
+    * @param eventType event type
+    * @throws MetaException error access the RDBMS
+    * @throws NoSuchObjectException never throws this AFAICT
+    * @throws TException thrift transport error
+    * @throws UnknownTableException no such table
+    * @throws UnknownDBException no such database
+    * @throws UnknownPartitionException no such partition
+    * @throws InvalidPartitionException partition partKVs is invalid
+    */
+   boolean isPartitionMarkedForEvent(String catName, String db_name, String tbl_name, Map<String,String> partKVs,
+                                     PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException,
+       UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException;
+ 
+   /**
+    * @param partVals
+    * @throws TException
+    * @throws MetaException
+    */
+   void validatePartitionNameCharacters(List<String> partVals) throws TException, MetaException;
+ 
+   /**
+    * @param tbl
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+ 
+   void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException;
+ 
+   /**
+    * Alter a table
+    * @param databaseName database name
+    * @param tblName table name
+    * @param table new table object, should be complete representation of the table, not just the
+    *             things you want to change.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table(String databaseName, String tblName, Table table)
+       throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Alter a table. Equivalent to
+    * {@link #alter_table(String, String, String, Table, EnvironmentContext)} with
+    * EnvironmentContext set to null.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param newTable new table object, should be complete representation of the table, not just the
+    *                 things you want to change.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   default void alter_table(String catName, String dbName, String tblName, Table newTable)
+       throws InvalidOperationException, MetaException, TException {
+     alter_table(catName, dbName, tblName, newTable, null);
+   }
+ 
+   /**
+    * Alter a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param newTable new table object, should be complete representation of the table, not just the
+    *                 things you want to change.
+    * @param envContext options for the alter.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table(String catName, String dbName, String tblName, Table newTable,
+                   EnvironmentContext envContext)
+       throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * @deprecated Use alter_table_with_environmentContext instead of alter_table with cascade option
+    * passed in EnvironmentContext using {@code StatsSetupConst.CASCADE}
+    */
+   @Deprecated
+   void alter_table(String defaultDatabaseName, String tblName, Table table,
+       boolean cascade) throws InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Alter a table.
+    * @param databaseName database name
+    * @param tblName table name
+    * @param table new table object, should be complete representation of the table, not just the
+    *              things you want to change.
+    * @param environmentContext options for the alter.
+    * @throws InvalidOperationException something is wrong with the new table object or an
+    * operation was attempted that is not allowed (such as changing partition columns).
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift exception
+    */
+   void alter_table_with_environmentContext(String databaseName, String tblName, Table table,
+       EnvironmentContext environmentContext) throws InvalidOperationException, MetaException,
+       TException;
+ 
+   /**
+    * Create a new database.
+    * @param db database object.  If the catalog name is null it will be assumed to be
+    *           {@link Warehouse#DEFAULT_CATALOG_NAME}.
+    * @throws InvalidObjectException There is something wrong with the database object.
+    * @throws AlreadyExistsException There is already a database of this name in the specified
+    * catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS
+    * @throws TException general thrift error
+    */
+   void createDatabase(Database db)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException;
+ 
+   /**
+    * Drop a database.
+    * @param name name of the database to drop.
+    * @throws NoSuchObjectException No such database exists.
+    * @throws InvalidOperationException The database cannot be dropped because it is not empty.
+    * @throws MetaException something went wrong, usually either in the RDMBS or in storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    *
+    * Drop a database.
+    * @param name name of the database to drop.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database cannot be dropped because it is not empty.
+    * @throws MetaException something went wrong, usually either in the RDMBS or in storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    *
+    * Drop a database.
+    * @param name database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @param cascade whether to drop contained tables, etc.  If this is false and there are
+    *                objects still in the database the drop will fail.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Drop a database.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @param cascade whether to drop contained tables, etc.  If this is false and there are
+    *                objects still in the database the drop will fail.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   void dropDatabase(String catName, String dbName, boolean deleteData, boolean ignoreUnknownDb,
+                     boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException;
+ 
+   /**
+    * Drop a database.  Equivalent to
+    * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with cascade = false.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param deleteData whether to drop the underlying HDFS directory.
+    * @param ignoreUnknownDb whether to ignore an attempt to drop a non-existant database
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   default void dropDatabase(String catName, String dbName, boolean deleteData,
+                             boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(catName, dbName, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   /**
+    * Drop a database.  Equivalent to
+    * {@link #dropDatabase(String, String, boolean, boolean, boolean)} with deleteData =
+    * true, ignoreUnknownDb = false, cascade = false.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog and
+    * ignoreUnknownDb is false.
+    * @throws InvalidOperationException The database contains objects and cascade is false.
+    * @throws MetaException something went wrong, usually either in the RDBMS or storage.
+    * @throws TException general thrift error.
+    */
+   default void dropDatabase(String catName, String dbName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(catName, dbName, true, false, false);
+   }
+ 
+ 
+   /**
+    * Alter a database.
+    * @param name database name.
+    * @param db new database object.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   void alterDatabase(String name, Database db)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Alter a database.
+    * @param catName Catalog name.  This can be null, in which case
+    *                {@link Warehouse#DEFAULT_CATALOG_NAME} will be assumed.
+    * @param dbName database name.
+    * @param newDb new database object.
+    * @throws NoSuchObjectException No database of this name exists in the specified catalog.
+    * @throws MetaException something went wrong, usually in the RDBMS.
+    * @throws TException general thrift error.
+    */
+   void alterDatabase(String catName, String dbName, Database newDb)
+       throws NoSuchObjectException, MetaException, TException;
+ 
+   /**
+    * Drop a partition.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_vals partition values, in the same order as the partition keys
+    * @param deleteData
+    *          delete the underlying data or just delete the partition in metadata
+    * @return true or false
+    * @throws NoSuchObjectException partition does not exist
+    * @throws MetaException error accessing the RDBMS or the storage.
+    * @throws TException thrift transport error
+    */
+   boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException;
+ 
+   /**
+    * Drop a partition.
+    * @param catName catalog name.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @param part_vals partition values, in the same order as the partition keys
+    * @param deleteData
+    *          delete the underlying data or just delete the partition in metadata
+    * @return true or false
+    * @throws NoSuchObjectException partition does not exist
+    * @throws MetaException error accessing the RDBMS or the storage.
+    * @throws TException thrift transport error
+    */
+   boolean dropPartition(String catName, String db_name, String tbl_name,
+                         List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException;
+ 
+   /**
+    * Drop a partition with the option to purge the partition data directly,
+    * rather than to move data to trash.
+    * @param db_name Name of the database.
+    * @param tbl_name Name of the table.
+    * @param part_vals Specification of the partitions being dropped.


<TRUNCATED>

[44/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
new file mode 100644
index 0000000..3530f2b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnRequest.java
@@ -0,0 +1,497 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AbortTxnRequest implements org.apache.thrift.TBase<AbortTxnRequest, AbortTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AbortTxnRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AbortTxnRequest");
+
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AbortTxnRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AbortTxnRequestTupleSchemeFactory());
+  }
+
+  private long txnid; // required
+  private String replPolicy; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXNID((short)1, "txnid"),
+    REPL_POLICY((short)2, "replPolicy");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXNID
+          return TXNID;
+        case 2: // REPL_POLICY
+          return REPL_POLICY;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXNID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.REPL_POLICY};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.REPL_POLICY, new org.apache.thrift.meta_data.FieldMetaData("replPolicy", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AbortTxnRequest.class, metaDataMap);
+  }
+
+  public AbortTxnRequest() {
+  }
+
+  public AbortTxnRequest(
+    long txnid)
+  {
+    this();
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AbortTxnRequest(AbortTxnRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.txnid = other.txnid;
+    if (other.isSetReplPolicy()) {
+      this.replPolicy = other.replPolicy;
+    }
+  }
+
+  public AbortTxnRequest deepCopy() {
+    return new AbortTxnRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setTxnidIsSet(false);
+    this.txnid = 0;
+    this.replPolicy = null;
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public String getReplPolicy() {
+    return this.replPolicy;
+  }
+
+  public void setReplPolicy(String replPolicy) {
+    this.replPolicy = replPolicy;
+  }
+
+  public void unsetReplPolicy() {
+    this.replPolicy = null;
+  }
+
+  /** Returns true if field replPolicy is set (has been assigned a value) and false otherwise */
+  public boolean isSetReplPolicy() {
+    return this.replPolicy != null;
+  }
+
+  public void setReplPolicyIsSet(boolean value) {
+    if (!value) {
+      this.replPolicy = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    case REPL_POLICY:
+      if (value == null) {
+        unsetReplPolicy();
+      } else {
+        setReplPolicy((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXNID:
+      return getTxnid();
+
+    case REPL_POLICY:
+      return getReplPolicy();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXNID:
+      return isSetTxnid();
+    case REPL_POLICY:
+      return isSetReplPolicy();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AbortTxnRequest)
+      return this.equals((AbortTxnRequest)that);
+    return false;
+  }
+
+  public boolean equals(AbortTxnRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txnid = true;
+    boolean that_present_txnid = true;
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    boolean this_present_replPolicy = true && this.isSetReplPolicy();
+    boolean that_present_replPolicy = true && that.isSetReplPolicy();
+    if (this_present_replPolicy || that_present_replPolicy) {
+      if (!(this_present_replPolicy && that_present_replPolicy))
+        return false;
+      if (!this.replPolicy.equals(that.replPolicy))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txnid = true;
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    boolean present_replPolicy = true && (isSetReplPolicy());
+    list.add(present_replPolicy);
+    if (present_replPolicy)
+      list.add(replPolicy);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AbortTxnRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetReplPolicy()).compareTo(other.isSetReplPolicy());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReplPolicy()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replPolicy, other.replPolicy);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AbortTxnRequest(");
+    boolean first = true;
+
+    sb.append("txnid:");
+    sb.append(this.txnid);
+    first = false;
+    if (isSetReplPolicy()) {
+      if (!first) sb.append(", ");
+      sb.append("replPolicy:");
+      if (this.replPolicy == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.replPolicy);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxnid()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnid' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AbortTxnRequestStandardSchemeFactory implements SchemeFactory {
+    public AbortTxnRequestStandardScheme getScheme() {
+      return new AbortTxnRequestStandardScheme();
+    }
+  }
+
+  private static class AbortTxnRequestStandardScheme extends StandardScheme<AbortTxnRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // REPL_POLICY
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.replPolicy = iprot.readString();
+              struct.setReplPolicyIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TXNID_FIELD_DESC);
+      oprot.writeI64(struct.txnid);
+      oprot.writeFieldEnd();
+      if (struct.replPolicy != null) {
+        if (struct.isSetReplPolicy()) {
+          oprot.writeFieldBegin(REPL_POLICY_FIELD_DESC);
+          oprot.writeString(struct.replPolicy);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AbortTxnRequestTupleSchemeFactory implements SchemeFactory {
+    public AbortTxnRequestTupleScheme getScheme() {
+      return new AbortTxnRequestTupleScheme();
+    }
+  }
+
+  private static class AbortTxnRequestTupleScheme extends TupleScheme<AbortTxnRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.txnid);
+      BitSet optionals = new BitSet();
+      if (struct.isSetReplPolicy()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetReplPolicy()) {
+        oprot.writeString(struct.replPolicy);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.txnid = iprot.readI64();
+      struct.setTxnidIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.replPolicy = iprot.readString();
+        struct.setReplPolicyIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
new file mode 100644
index 0000000..3ee3370
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AbortTxnsRequest.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AbortTxnsRequest implements org.apache.thrift.TBase<AbortTxnsRequest, AbortTxnsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AbortTxnsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AbortTxnsRequest");
+
+  private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_ids", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AbortTxnsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AbortTxnsRequestTupleSchemeFactory());
+  }
+
+  private List<Long> txn_ids; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXN_IDS((short)1, "txn_ids");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXN_IDS
+          return TXN_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txn_ids", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AbortTxnsRequest.class, metaDataMap);
+  }
+
+  public AbortTxnsRequest() {
+  }
+
+  public AbortTxnsRequest(
+    List<Long> txn_ids)
+  {
+    this();
+    this.txn_ids = txn_ids;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AbortTxnsRequest(AbortTxnsRequest other) {
+    if (other.isSetTxn_ids()) {
+      List<Long> __this__txn_ids = new ArrayList<Long>(other.txn_ids);
+      this.txn_ids = __this__txn_ids;
+    }
+  }
+
+  public AbortTxnsRequest deepCopy() {
+    return new AbortTxnsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.txn_ids = null;
+  }
+
+  public int getTxn_idsSize() {
+    return (this.txn_ids == null) ? 0 : this.txn_ids.size();
+  }
+
+  public java.util.Iterator<Long> getTxn_idsIterator() {
+    return (this.txn_ids == null) ? null : this.txn_ids.iterator();
+  }
+
+  public void addToTxn_ids(long elem) {
+    if (this.txn_ids == null) {
+      this.txn_ids = new ArrayList<Long>();
+    }
+    this.txn_ids.add(elem);
+  }
+
+  public List<Long> getTxn_ids() {
+    return this.txn_ids;
+  }
+
+  public void setTxn_ids(List<Long> txn_ids) {
+    this.txn_ids = txn_ids;
+  }
+
+  public void unsetTxn_ids() {
+    this.txn_ids = null;
+  }
+
+  /** Returns true if field txn_ids is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxn_ids() {
+    return this.txn_ids != null;
+  }
+
+  public void setTxn_idsIsSet(boolean value) {
+    if (!value) {
+      this.txn_ids = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXN_IDS:
+      if (value == null) {
+        unsetTxn_ids();
+      } else {
+        setTxn_ids((List<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXN_IDS:
+      return getTxn_ids();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXN_IDS:
+      return isSetTxn_ids();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AbortTxnsRequest)
+      return this.equals((AbortTxnsRequest)that);
+    return false;
+  }
+
+  public boolean equals(AbortTxnsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txn_ids = true && this.isSetTxn_ids();
+    boolean that_present_txn_ids = true && that.isSetTxn_ids();
+    if (this_present_txn_ids || that_present_txn_ids) {
+      if (!(this_present_txn_ids && that_present_txn_ids))
+        return false;
+      if (!this.txn_ids.equals(that.txn_ids))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txn_ids = true && (isSetTxn_ids());
+    list.add(present_txn_ids);
+    if (present_txn_ids)
+      list.add(txn_ids);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AbortTxnsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxn_ids()).compareTo(other.isSetTxn_ids());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxn_ids()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txn_ids, other.txn_ids);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AbortTxnsRequest(");
+    boolean first = true;
+
+    sb.append("txn_ids:");
+    if (this.txn_ids == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.txn_ids);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxn_ids()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txn_ids' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AbortTxnsRequestStandardSchemeFactory implements SchemeFactory {
+    public AbortTxnsRequestStandardScheme getScheme() {
+      return new AbortTxnsRequestStandardScheme();
+    }
+  }
+
+  private static class AbortTxnsRequestStandardScheme extends StandardScheme<AbortTxnsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AbortTxnsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXN_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list586 = iprot.readListBegin();
+                struct.txn_ids = new ArrayList<Long>(_list586.size);
+                long _elem587;
+                for (int _i588 = 0; _i588 < _list586.size; ++_i588)
+                {
+                  _elem587 = iprot.readI64();
+                  struct.txn_ids.add(_elem587);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTxn_idsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AbortTxnsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.txn_ids != null) {
+        oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size()));
+          for (long _iter589 : struct.txn_ids)
+          {
+            oprot.writeI64(_iter589);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AbortTxnsRequestTupleSchemeFactory implements SchemeFactory {
+    public AbortTxnsRequestTupleScheme getScheme() {
+      return new AbortTxnsRequestTupleScheme();
+    }
+  }
+
+  private static class AbortTxnsRequestTupleScheme extends TupleScheme<AbortTxnsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.txn_ids.size());
+        for (long _iter590 : struct.txn_ids)
+        {
+          oprot.writeI64(_iter590);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AbortTxnsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list591 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.txn_ids = new ArrayList<Long>(_list591.size);
+        long _elem592;
+        for (int _i593 = 0; _i593 < _list591.size; ++_i593)
+        {
+          _elem592 = iprot.readI64();
+          struct.txn_ids.add(_elem592);
+        }
+      }
+      struct.setTxn_idsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java
new file mode 100644
index 0000000..02d552d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddCheckConstraintRequest.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddCheckConstraintRequest implements org.apache.thrift.TBase<AddCheckConstraintRequest, AddCheckConstraintRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddCheckConstraintRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddCheckConstraintRequest");
+
+  private static final org.apache.thrift.protocol.TField CHECK_CONSTRAINT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("checkConstraintCols", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddCheckConstraintRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddCheckConstraintRequestTupleSchemeFactory());
+  }
+
+  private List<SQLCheckConstraint> checkConstraintCols; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CHECK_CONSTRAINT_COLS((short)1, "checkConstraintCols");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CHECK_CONSTRAINT_COLS
+          return CHECK_CONSTRAINT_COLS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CHECK_CONSTRAINT_COLS, new org.apache.thrift.meta_data.FieldMetaData("checkConstraintCols", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLCheckConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddCheckConstraintRequest.class, metaDataMap);
+  }
+
+  public AddCheckConstraintRequest() {
+  }
+
+  public AddCheckConstraintRequest(
+    List<SQLCheckConstraint> checkConstraintCols)
+  {
+    this();
+    this.checkConstraintCols = checkConstraintCols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddCheckConstraintRequest(AddCheckConstraintRequest other) {
+    if (other.isSetCheckConstraintCols()) {
+      List<SQLCheckConstraint> __this__checkConstraintCols = new ArrayList<SQLCheckConstraint>(other.checkConstraintCols.size());
+      for (SQLCheckConstraint other_element : other.checkConstraintCols) {
+        __this__checkConstraintCols.add(new SQLCheckConstraint(other_element));
+      }
+      this.checkConstraintCols = __this__checkConstraintCols;
+    }
+  }
+
+  public AddCheckConstraintRequest deepCopy() {
+    return new AddCheckConstraintRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.checkConstraintCols = null;
+  }
+
+  public int getCheckConstraintColsSize() {
+    return (this.checkConstraintCols == null) ? 0 : this.checkConstraintCols.size();
+  }
+
+  public java.util.Iterator<SQLCheckConstraint> getCheckConstraintColsIterator() {
+    return (this.checkConstraintCols == null) ? null : this.checkConstraintCols.iterator();
+  }
+
+  public void addToCheckConstraintCols(SQLCheckConstraint elem) {
+    if (this.checkConstraintCols == null) {
+      this.checkConstraintCols = new ArrayList<SQLCheckConstraint>();
+    }
+    this.checkConstraintCols.add(elem);
+  }
+
+  public List<SQLCheckConstraint> getCheckConstraintCols() {
+    return this.checkConstraintCols;
+  }
+
+  public void setCheckConstraintCols(List<SQLCheckConstraint> checkConstraintCols) {
+    this.checkConstraintCols = checkConstraintCols;
+  }
+
+  public void unsetCheckConstraintCols() {
+    this.checkConstraintCols = null;
+  }
+
+  /** Returns true if field checkConstraintCols is set (has been assigned a value) and false otherwise */
+  public boolean isSetCheckConstraintCols() {
+    return this.checkConstraintCols != null;
+  }
+
+  public void setCheckConstraintColsIsSet(boolean value) {
+    if (!value) {
+      this.checkConstraintCols = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CHECK_CONSTRAINT_COLS:
+      if (value == null) {
+        unsetCheckConstraintCols();
+      } else {
+        setCheckConstraintCols((List<SQLCheckConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CHECK_CONSTRAINT_COLS:
+      return getCheckConstraintCols();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CHECK_CONSTRAINT_COLS:
+      return isSetCheckConstraintCols();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddCheckConstraintRequest)
+      return this.equals((AddCheckConstraintRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddCheckConstraintRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_checkConstraintCols = true && this.isSetCheckConstraintCols();
+    boolean that_present_checkConstraintCols = true && that.isSetCheckConstraintCols();
+    if (this_present_checkConstraintCols || that_present_checkConstraintCols) {
+      if (!(this_present_checkConstraintCols && that_present_checkConstraintCols))
+        return false;
+      if (!this.checkConstraintCols.equals(that.checkConstraintCols))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_checkConstraintCols = true && (isSetCheckConstraintCols());
+    list.add(present_checkConstraintCols);
+    if (present_checkConstraintCols)
+      list.add(checkConstraintCols);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddCheckConstraintRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCheckConstraintCols()).compareTo(other.isSetCheckConstraintCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCheckConstraintCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.checkConstraintCols, other.checkConstraintCols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddCheckConstraintRequest(");
+    boolean first = true;
+
+    sb.append("checkConstraintCols:");
+    if (this.checkConstraintCols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.checkConstraintCols);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetCheckConstraintCols()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'checkConstraintCols' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddCheckConstraintRequestStandardSchemeFactory implements SchemeFactory {
+    public AddCheckConstraintRequestStandardScheme getScheme() {
+      return new AddCheckConstraintRequestStandardScheme();
+    }
+  }
+
+  private static class AddCheckConstraintRequestStandardScheme extends StandardScheme<AddCheckConstraintRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CHECK_CONSTRAINT_COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list408 = iprot.readListBegin();
+                struct.checkConstraintCols = new ArrayList<SQLCheckConstraint>(_list408.size);
+                SQLCheckConstraint _elem409;
+                for (int _i410 = 0; _i410 < _list408.size; ++_i410)
+                {
+                  _elem409 = new SQLCheckConstraint();
+                  _elem409.read(iprot);
+                  struct.checkConstraintCols.add(_elem409);
+                }
+                iprot.readListEnd();
+              }
+              struct.setCheckConstraintColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.checkConstraintCols != null) {
+        oprot.writeFieldBegin(CHECK_CONSTRAINT_COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraintCols.size()));
+          for (SQLCheckConstraint _iter411 : struct.checkConstraintCols)
+          {
+            _iter411.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddCheckConstraintRequestTupleSchemeFactory implements SchemeFactory {
+    public AddCheckConstraintRequestTupleScheme getScheme() {
+      return new AddCheckConstraintRequestTupleScheme();
+    }
+  }
+
+  private static class AddCheckConstraintRequestTupleScheme extends TupleScheme<AddCheckConstraintRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.checkConstraintCols.size());
+        for (SQLCheckConstraint _iter412 : struct.checkConstraintCols)
+        {
+          _iter412.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddCheckConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list413 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.checkConstraintCols = new ArrayList<SQLCheckConstraint>(_list413.size);
+        SQLCheckConstraint _elem414;
+        for (int _i415 = 0; _i415 < _list413.size; ++_i415)
+        {
+          _elem414 = new SQLCheckConstraint();
+          _elem414.read(iprot);
+          struct.checkConstraintCols.add(_elem414);
+        }
+      }
+      struct.setCheckConstraintColsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java
new file mode 100644
index 0000000..6acc6f8
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDefaultConstraintRequest.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddDefaultConstraintRequest implements org.apache.thrift.TBase<AddDefaultConstraintRequest, AddDefaultConstraintRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddDefaultConstraintRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddDefaultConstraintRequest");
+
+  private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraintCols", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddDefaultConstraintRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddDefaultConstraintRequestTupleSchemeFactory());
+  }
+
+  private List<SQLDefaultConstraint> defaultConstraintCols; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DEFAULT_CONSTRAINT_COLS((short)1, "defaultConstraintCols");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DEFAULT_CONSTRAINT_COLS
+          return DEFAULT_CONSTRAINT_COLS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DEFAULT_CONSTRAINT_COLS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraintCols", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddDefaultConstraintRequest.class, metaDataMap);
+  }
+
+  public AddDefaultConstraintRequest() {
+  }
+
+  public AddDefaultConstraintRequest(
+    List<SQLDefaultConstraint> defaultConstraintCols)
+  {
+    this();
+    this.defaultConstraintCols = defaultConstraintCols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddDefaultConstraintRequest(AddDefaultConstraintRequest other) {
+    if (other.isSetDefaultConstraintCols()) {
+      List<SQLDefaultConstraint> __this__defaultConstraintCols = new ArrayList<SQLDefaultConstraint>(other.defaultConstraintCols.size());
+      for (SQLDefaultConstraint other_element : other.defaultConstraintCols) {
+        __this__defaultConstraintCols.add(new SQLDefaultConstraint(other_element));
+      }
+      this.defaultConstraintCols = __this__defaultConstraintCols;
+    }
+  }
+
+  public AddDefaultConstraintRequest deepCopy() {
+    return new AddDefaultConstraintRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.defaultConstraintCols = null;
+  }
+
+  public int getDefaultConstraintColsSize() {
+    return (this.defaultConstraintCols == null) ? 0 : this.defaultConstraintCols.size();
+  }
+
+  public java.util.Iterator<SQLDefaultConstraint> getDefaultConstraintColsIterator() {
+    return (this.defaultConstraintCols == null) ? null : this.defaultConstraintCols.iterator();
+  }
+
+  public void addToDefaultConstraintCols(SQLDefaultConstraint elem) {
+    if (this.defaultConstraintCols == null) {
+      this.defaultConstraintCols = new ArrayList<SQLDefaultConstraint>();
+    }
+    this.defaultConstraintCols.add(elem);
+  }
+
+  public List<SQLDefaultConstraint> getDefaultConstraintCols() {
+    return this.defaultConstraintCols;
+  }
+
+  public void setDefaultConstraintCols(List<SQLDefaultConstraint> defaultConstraintCols) {
+    this.defaultConstraintCols = defaultConstraintCols;
+  }
+
+  public void unsetDefaultConstraintCols() {
+    this.defaultConstraintCols = null;
+  }
+
+  /** Returns true if field defaultConstraintCols is set (has been assigned a value) and false otherwise */
+  public boolean isSetDefaultConstraintCols() {
+    return this.defaultConstraintCols != null;
+  }
+
+  public void setDefaultConstraintColsIsSet(boolean value) {
+    if (!value) {
+      this.defaultConstraintCols = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DEFAULT_CONSTRAINT_COLS:
+      if (value == null) {
+        unsetDefaultConstraintCols();
+      } else {
+        setDefaultConstraintCols((List<SQLDefaultConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DEFAULT_CONSTRAINT_COLS:
+      return getDefaultConstraintCols();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DEFAULT_CONSTRAINT_COLS:
+      return isSetDefaultConstraintCols();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddDefaultConstraintRequest)
+      return this.equals((AddDefaultConstraintRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddDefaultConstraintRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_defaultConstraintCols = true && this.isSetDefaultConstraintCols();
+    boolean that_present_defaultConstraintCols = true && that.isSetDefaultConstraintCols();
+    if (this_present_defaultConstraintCols || that_present_defaultConstraintCols) {
+      if (!(this_present_defaultConstraintCols && that_present_defaultConstraintCols))
+        return false;
+      if (!this.defaultConstraintCols.equals(that.defaultConstraintCols))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_defaultConstraintCols = true && (isSetDefaultConstraintCols());
+    list.add(present_defaultConstraintCols);
+    if (present_defaultConstraintCols)
+      list.add(defaultConstraintCols);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddDefaultConstraintRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDefaultConstraintCols()).compareTo(other.isSetDefaultConstraintCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDefaultConstraintCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultConstraintCols, other.defaultConstraintCols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddDefaultConstraintRequest(");
+    boolean first = true;
+
+    sb.append("defaultConstraintCols:");
+    if (this.defaultConstraintCols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.defaultConstraintCols);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDefaultConstraintCols()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'defaultConstraintCols' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddDefaultConstraintRequestStandardSchemeFactory implements SchemeFactory {
+    public AddDefaultConstraintRequestStandardScheme getScheme() {
+      return new AddDefaultConstraintRequestStandardScheme();
+    }
+  }
+
+  private static class AddDefaultConstraintRequestStandardScheme extends StandardScheme<AddDefaultConstraintRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DEFAULT_CONSTRAINT_COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list400 = iprot.readListBegin();
+                struct.defaultConstraintCols = new ArrayList<SQLDefaultConstraint>(_list400.size);
+                SQLDefaultConstraint _elem401;
+                for (int _i402 = 0; _i402 < _list400.size; ++_i402)
+                {
+                  _elem401 = new SQLDefaultConstraint();
+                  _elem401.read(iprot);
+                  struct.defaultConstraintCols.add(_elem401);
+                }
+                iprot.readListEnd();
+              }
+              struct.setDefaultConstraintColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.defaultConstraintCols != null) {
+        oprot.writeFieldBegin(DEFAULT_CONSTRAINT_COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraintCols.size()));
+          for (SQLDefaultConstraint _iter403 : struct.defaultConstraintCols)
+          {
+            _iter403.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddDefaultConstraintRequestTupleSchemeFactory implements SchemeFactory {
+    public AddDefaultConstraintRequestTupleScheme getScheme() {
+      return new AddDefaultConstraintRequestTupleScheme();
+    }
+  }
+
+  private static class AddDefaultConstraintRequestTupleScheme extends TupleScheme<AddDefaultConstraintRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.defaultConstraintCols.size());
+        for (SQLDefaultConstraint _iter404 : struct.defaultConstraintCols)
+        {
+          _iter404.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddDefaultConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list405 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.defaultConstraintCols = new ArrayList<SQLDefaultConstraint>(_list405.size);
+        SQLDefaultConstraint _elem406;
+        for (int _i407 = 0; _i407 < _list405.size; ++_i407)
+        {
+          _elem406 = new SQLDefaultConstraint();
+          _elem406.read(iprot);
+          struct.defaultConstraintCols.add(_elem406);
+        }
+      }
+      struct.setDefaultConstraintColsIsSet(true);
+    }
+  }
+
+}
+


[12/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
new file mode 100644
index 0000000..a868b63
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Order.java
@@ -0,0 +1,497 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Order implements org.apache.thrift.TBase<Order, Order._Fields>, java.io.Serializable, Cloneable, Comparable<Order> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Order");
+
+  private static final org.apache.thrift.protocol.TField COL_FIELD_DESC = new org.apache.thrift.protocol.TField("col", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField ORDER_FIELD_DESC = new org.apache.thrift.protocol.TField("order", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new OrderStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new OrderTupleSchemeFactory());
+  }
+
+  private String col; // required
+  private int order; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    COL((short)1, "col"),
+    ORDER((short)2, "order");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // COL
+          return COL;
+        case 2: // ORDER
+          return ORDER;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ORDER_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.COL, new org.apache.thrift.meta_data.FieldMetaData("col", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ORDER, new org.apache.thrift.meta_data.FieldMetaData("order", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Order.class, metaDataMap);
+  }
+
+  public Order() {
+  }
+
+  public Order(
+    String col,
+    int order)
+  {
+    this();
+    this.col = col;
+    this.order = order;
+    setOrderIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Order(Order other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetCol()) {
+      this.col = other.col;
+    }
+    this.order = other.order;
+  }
+
+  public Order deepCopy() {
+    return new Order(this);
+  }
+
+  @Override
+  public void clear() {
+    this.col = null;
+    setOrderIsSet(false);
+    this.order = 0;
+  }
+
+  public String getCol() {
+    return this.col;
+  }
+
+  public void setCol(String col) {
+    this.col = col;
+  }
+
+  public void unsetCol() {
+    this.col = null;
+  }
+
+  /** Returns true if field col is set (has been assigned a value) and false otherwise */
+  public boolean isSetCol() {
+    return this.col != null;
+  }
+
+  public void setColIsSet(boolean value) {
+    if (!value) {
+      this.col = null;
+    }
+  }
+
+  public int getOrder() {
+    return this.order;
+  }
+
+  public void setOrder(int order) {
+    this.order = order;
+    setOrderIsSet(true);
+  }
+
+  public void unsetOrder() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ORDER_ISSET_ID);
+  }
+
+  /** Returns true if field order is set (has been assigned a value) and false otherwise */
+  public boolean isSetOrder() {
+    return EncodingUtils.testBit(__isset_bitfield, __ORDER_ISSET_ID);
+  }
+
+  public void setOrderIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ORDER_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case COL:
+      if (value == null) {
+        unsetCol();
+      } else {
+        setCol((String)value);
+      }
+      break;
+
+    case ORDER:
+      if (value == null) {
+        unsetOrder();
+      } else {
+        setOrder((Integer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case COL:
+      return getCol();
+
+    case ORDER:
+      return getOrder();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case COL:
+      return isSetCol();
+    case ORDER:
+      return isSetOrder();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Order)
+      return this.equals((Order)that);
+    return false;
+  }
+
+  public boolean equals(Order that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_col = true && this.isSetCol();
+    boolean that_present_col = true && that.isSetCol();
+    if (this_present_col || that_present_col) {
+      if (!(this_present_col && that_present_col))
+        return false;
+      if (!this.col.equals(that.col))
+        return false;
+    }
+
+    boolean this_present_order = true;
+    boolean that_present_order = true;
+    if (this_present_order || that_present_order) {
+      if (!(this_present_order && that_present_order))
+        return false;
+      if (this.order != that.order)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_col = true && (isSetCol());
+    list.add(present_col);
+    if (present_col)
+      list.add(col);
+
+    boolean present_order = true;
+    list.add(present_order);
+    if (present_order)
+      list.add(order);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Order other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCol()).compareTo(other.isSetCol());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCol()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.col, other.col);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOrder()).compareTo(other.isSetOrder());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOrder()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.order, other.order);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Order(");
+    boolean first = true;
+
+    sb.append("col:");
+    if (this.col == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.col);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("order:");
+    sb.append(this.order);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class OrderStandardSchemeFactory implements SchemeFactory {
+    public OrderStandardScheme getScheme() {
+      return new OrderStandardScheme();
+    }
+  }
+
+  private static class OrderStandardScheme extends StandardScheme<Order> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Order struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // COL
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.col = iprot.readString();
+              struct.setColIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // ORDER
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.order = iprot.readI32();
+              struct.setOrderIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Order struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.col != null) {
+        oprot.writeFieldBegin(COL_FIELD_DESC);
+        oprot.writeString(struct.col);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(ORDER_FIELD_DESC);
+      oprot.writeI32(struct.order);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class OrderTupleSchemeFactory implements SchemeFactory {
+    public OrderTupleScheme getScheme() {
+      return new OrderTupleScheme();
+    }
+  }
+
+  private static class OrderTupleScheme extends TupleScheme<Order> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Order struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCol()) {
+        optionals.set(0);
+      }
+      if (struct.isSetOrder()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetCol()) {
+        oprot.writeString(struct.col);
+      }
+      if (struct.isSetOrder()) {
+        oprot.writeI32(struct.order);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Order struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.col = iprot.readString();
+        struct.setColIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.order = iprot.readI32();
+        struct.setOrderIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
new file mode 100644
index 0000000..51f809a
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
@@ -0,0 +1,1335 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Partition implements org.apache.thrift.TBase<Partition, Partition._Fields>, java.io.Serializable, Cloneable, Comparable<Partition> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Partition");
+
+  private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField LAST_ACCESS_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAccessTime", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)6);
+  private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7);
+  private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionTupleSchemeFactory());
+  }
+
+  private List<String> values; // required
+  private String dbName; // required
+  private String tableName; // required
+  private int createTime; // required
+  private int lastAccessTime; // required
+  private StorageDescriptor sd; // required
+  private Map<String,String> parameters; // required
+  private PrincipalPrivilegeSet privileges; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    VALUES((short)1, "values"),
+    DB_NAME((short)2, "dbName"),
+    TABLE_NAME((short)3, "tableName"),
+    CREATE_TIME((short)4, "createTime"),
+    LAST_ACCESS_TIME((short)5, "lastAccessTime"),
+    SD((short)6, "sd"),
+    PARAMETERS((short)7, "parameters"),
+    PRIVILEGES((short)8, "privileges"),
+    CAT_NAME((short)9, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // VALUES
+          return VALUES;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TABLE_NAME
+          return TABLE_NAME;
+        case 4: // CREATE_TIME
+          return CREATE_TIME;
+        case 5: // LAST_ACCESS_TIME
+          return LAST_ACCESS_TIME;
+        case 6: // SD
+          return SD;
+        case 7: // PARAMETERS
+          return PARAMETERS;
+        case 8: // PRIVILEGES
+          return PRIVILEGES;
+        case 9: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CREATETIME_ISSET_ID = 0;
+  private static final int __LASTACCESSTIME_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StorageDescriptor.class)));
+    tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap);
+  }
+
+  public Partition() {
+  }
+
+  public Partition(
+    List<String> values,
+    String dbName,
+    String tableName,
+    int createTime,
+    int lastAccessTime,
+    StorageDescriptor sd,
+    Map<String,String> parameters)
+  {
+    this();
+    this.values = values;
+    this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(dbName);
+    this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(tableName);
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+    this.lastAccessTime = lastAccessTime;
+    setLastAccessTimeIsSet(true);
+    this.sd = sd;
+    this.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(parameters);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Partition(Partition other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetValues()) {
+      List<String> __this__values = new ArrayList<String>(other.values);
+      this.values = __this__values;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.dbName);
+    }
+    if (other.isSetTableName()) {
+      this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.tableName);
+    }
+    this.createTime = other.createTime;
+    this.lastAccessTime = other.lastAccessTime;
+    if (other.isSetSd()) {
+      this.sd = new StorageDescriptor(other.sd);
+    }
+    if (other.isSetParameters()) {
+      Map<String,String> __this__parameters = new HashMap<String,String>(other.parameters);
+      this.parameters = __this__parameters;
+    }
+    if (other.isSetPrivileges()) {
+      this.privileges = new PrincipalPrivilegeSet(other.privileges);
+    }
+    if (other.isSetCatName()) {
+      this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName);
+    }
+  }
+
+  public Partition deepCopy() {
+    return new Partition(this);
+  }
+
+  @Override
+  public void clear() {
+    this.values = null;
+    this.dbName = null;
+    this.tableName = null;
+    setCreateTimeIsSet(false);
+    this.createTime = 0;
+    setLastAccessTimeIsSet(false);
+    this.lastAccessTime = 0;
+    this.sd = null;
+    this.parameters = null;
+    this.privileges = null;
+    this.catName = null;
+  }
+
+  public int getValuesSize() {
+    return (this.values == null) ? 0 : this.values.size();
+  }
+
+  public java.util.Iterator<String> getValuesIterator() {
+    return (this.values == null) ? null : this.values.iterator();
+  }
+
+  public void addToValues(String elem) {
+    if (this.values == null) {
+      this.values = new ArrayList<String>();
+    }
+    this.values.add(elem);
+  }
+
+  public List<String> getValues() {
+    return this.values;
+  }
+
+  public void setValues(List<String> values) {
+    this.values = values;
+  }
+
+  public void unsetValues() {
+    this.values = null;
+  }
+
+  /** Returns true if field values is set (has been assigned a value) and false otherwise */
+  public boolean isSetValues() {
+    return this.values != null;
+  }
+
+  public void setValuesIsSet(boolean value) {
+    if (!value) {
+      this.values = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(dbName);
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(tableName);
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public int getCreateTime() {
+    return this.createTime;
+  }
+
+  public void setCreateTime(int createTime) {
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+  }
+
+  public void unsetCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  public void setCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+  }
+
+  public int getLastAccessTime() {
+    return this.lastAccessTime;
+  }
+
+  public void setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    setLastAccessTimeIsSet(true);
+  }
+
+  public void unsetLastAccessTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+  }
+
+  /** Returns true if field lastAccessTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetLastAccessTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+  }
+
+  public void setLastAccessTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID, value);
+  }
+
+  public StorageDescriptor getSd() {
+    return this.sd;
+  }
+
+  public void setSd(StorageDescriptor sd) {
+    this.sd = sd;
+  }
+
+  public void unsetSd() {
+    this.sd = null;
+  }
+
+  /** Returns true if field sd is set (has been assigned a value) and false otherwise */
+  public boolean isSetSd() {
+    return this.sd != null;
+  }
+
+  public void setSdIsSet(boolean value) {
+    if (!value) {
+      this.sd = null;
+    }
+  }
+
+  public int getParametersSize() {
+    return (this.parameters == null) ? 0 : this.parameters.size();
+  }
+
+  public void putToParameters(String key, String val) {
+    if (this.parameters == null) {
+      this.parameters = new HashMap<String,String>();
+    }
+    this.parameters.put(org.apache.hadoop.hive.metastore.utils.StringUtils.intern(key), org.apache.hadoop.hive.metastore.utils.StringUtils.intern(val));
+  }
+
+  public Map<String,String> getParameters() {
+    return this.parameters;
+  }
+
+  public void setParameters(Map<String,String> parameters) {
+    this.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(parameters);
+  }
+
+  public void unsetParameters() {
+    this.parameters = null;
+  }
+
+  /** Returns true if field parameters is set (has been assigned a value) and false otherwise */
+  public boolean isSetParameters() {
+    return this.parameters != null;
+  }
+
+  public void setParametersIsSet(boolean value) {
+    if (!value) {
+      this.parameters = null;
+    }
+  }
+
+  public PrincipalPrivilegeSet getPrivileges() {
+    return this.privileges;
+  }
+
+  public void setPrivileges(PrincipalPrivilegeSet privileges) {
+    this.privileges = privileges;
+  }
+
+  public void unsetPrivileges() {
+    this.privileges = null;
+  }
+
+  /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivileges() {
+    return this.privileges != null;
+  }
+
+  public void setPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.privileges = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(catName);
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case VALUES:
+      if (value == null) {
+        unsetValues();
+      } else {
+        setValues((List<String>)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case CREATE_TIME:
+      if (value == null) {
+        unsetCreateTime();
+      } else {
+        setCreateTime((Integer)value);
+      }
+      break;
+
+    case LAST_ACCESS_TIME:
+      if (value == null) {
+        unsetLastAccessTime();
+      } else {
+        setLastAccessTime((Integer)value);
+      }
+      break;
+
+    case SD:
+      if (value == null) {
+        unsetSd();
+      } else {
+        setSd((StorageDescriptor)value);
+      }
+      break;
+
+    case PARAMETERS:
+      if (value == null) {
+        unsetParameters();
+      } else {
+        setParameters((Map<String,String>)value);
+      }
+      break;
+
+    case PRIVILEGES:
+      if (value == null) {
+        unsetPrivileges();
+      } else {
+        setPrivileges((PrincipalPrivilegeSet)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case VALUES:
+      return getValues();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case CREATE_TIME:
+      return getCreateTime();
+
+    case LAST_ACCESS_TIME:
+      return getLastAccessTime();
+
+    case SD:
+      return getSd();
+
+    case PARAMETERS:
+      return getParameters();
+
+    case PRIVILEGES:
+      return getPrivileges();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case VALUES:
+      return isSetValues();
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case CREATE_TIME:
+      return isSetCreateTime();
+    case LAST_ACCESS_TIME:
+      return isSetLastAccessTime();
+    case SD:
+      return isSetSd();
+    case PARAMETERS:
+      return isSetParameters();
+    case PRIVILEGES:
+      return isSetPrivileges();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Partition)
+      return this.equals((Partition)that);
+    return false;
+  }
+
+  public boolean equals(Partition that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_values = true && this.isSetValues();
+    boolean that_present_values = true && that.isSetValues();
+    if (this_present_values || that_present_values) {
+      if (!(this_present_values && that_present_values))
+        return false;
+      if (!this.values.equals(that.values))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_createTime = true;
+    boolean that_present_createTime = true;
+    if (this_present_createTime || that_present_createTime) {
+      if (!(this_present_createTime && that_present_createTime))
+        return false;
+      if (this.createTime != that.createTime)
+        return false;
+    }
+
+    boolean this_present_lastAccessTime = true;
+    boolean that_present_lastAccessTime = true;
+    if (this_present_lastAccessTime || that_present_lastAccessTime) {
+      if (!(this_present_lastAccessTime && that_present_lastAccessTime))
+        return false;
+      if (this.lastAccessTime != that.lastAccessTime)
+        return false;
+    }
+
+    boolean this_present_sd = true && this.isSetSd();
+    boolean that_present_sd = true && that.isSetSd();
+    if (this_present_sd || that_present_sd) {
+      if (!(this_present_sd && that_present_sd))
+        return false;
+      if (!this.sd.equals(that.sd))
+        return false;
+    }
+
+    boolean this_present_parameters = true && this.isSetParameters();
+    boolean that_present_parameters = true && that.isSetParameters();
+    if (this_present_parameters || that_present_parameters) {
+      if (!(this_present_parameters && that_present_parameters))
+        return false;
+      if (!this.parameters.equals(that.parameters))
+        return false;
+    }
+
+    boolean this_present_privileges = true && this.isSetPrivileges();
+    boolean that_present_privileges = true && that.isSetPrivileges();
+    if (this_present_privileges || that_present_privileges) {
+      if (!(this_present_privileges && that_present_privileges))
+        return false;
+      if (!this.privileges.equals(that.privileges))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_values = true && (isSetValues());
+    list.add(present_values);
+    if (present_values)
+      list.add(values);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_createTime = true;
+    list.add(present_createTime);
+    if (present_createTime)
+      list.add(createTime);
+
+    boolean present_lastAccessTime = true;
+    list.add(present_lastAccessTime);
+    if (present_lastAccessTime)
+      list.add(lastAccessTime);
+
+    boolean present_sd = true && (isSetSd());
+    list.add(present_sd);
+    if (present_sd)
+      list.add(sd);
+
+    boolean present_parameters = true && (isSetParameters());
+    list.add(present_parameters);
+    if (present_parameters)
+      list.add(parameters);
+
+    boolean present_privileges = true && (isSetPrivileges());
+    list.add(present_privileges);
+    if (present_privileges)
+      list.add(privileges);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Partition other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValues()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetLastAccessTime()).compareTo(other.isSetLastAccessTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLastAccessTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lastAccessTime, other.lastAccessTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSd()).compareTo(other.isSetSd());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSd()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sd, other.sd);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetParameters()).compareTo(other.isSetParameters());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParameters()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, other.parameters);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Partition(");
+    boolean first = true;
+
+    sb.append("values:");
+    if (this.values == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.values);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("createTime:");
+    sb.append(this.createTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("lastAccessTime:");
+    sb.append(this.lastAccessTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("sd:");
+    if (this.sd == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.sd);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("parameters:");
+    if (this.parameters == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parameters);
+    }
+    first = false;
+    if (isSetPrivileges()) {
+      if (!first) sb.append(", ");
+      sb.append("privileges:");
+      if (this.privileges == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.privileges);
+      }
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (sd != null) {
+      sd.validate();
+    }
+    if (privileges != null) {
+      privileges.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionStandardSchemeFactory implements SchemeFactory {
+    public PartitionStandardScheme getScheme() {
+      return new PartitionStandardScheme();
+    }
+  }
+
+  private static class PartitionStandardScheme extends StandardScheme<Partition> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // VALUES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list216 = iprot.readListBegin();
+                struct.values = new ArrayList<String>(_list216.size);
+                String _elem217;
+                for (int _i218 = 0; _i218 < _list216.size; ++_i218)
+                {
+                  _elem217 = iprot.readString();
+                  struct.values.add(_elem217);
+                }
+                iprot.readListEnd();
+              }
+              struct.setValuesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.createTime = iprot.readI32();
+              struct.setCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // LAST_ACCESS_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.lastAccessTime = iprot.readI32();
+              struct.setLastAccessTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // SD
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.sd = new StorageDescriptor();
+              struct.sd.read(iprot);
+              struct.setSdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // PARAMETERS
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map219 = iprot.readMapBegin();
+                struct.parameters = new HashMap<String,String>(2*_map219.size);
+                String _key220;
+                String _val221;
+                for (int _i222 = 0; _i222 < _map219.size; ++_i222)
+                {
+                  _key220 = iprot.readString();
+                  _val221 = iprot.readString();
+                  struct.parameters.put(_key220, _val221);
+                }
+                iprot.readMapEnd();
+              }
+              struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.privileges = new PrincipalPrivilegeSet();
+              struct.privileges.read(iprot);
+              struct.setPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.values != null) {
+        oprot.writeFieldBegin(VALUES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size()));
+          for (String _iter223 : struct.values)
+          {
+            oprot.writeString(_iter223);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+      oprot.writeI32(struct.createTime);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(LAST_ACCESS_TIME_FIELD_DESC);
+      oprot.writeI32(struct.lastAccessTime);
+      oprot.writeFieldEnd();
+      if (struct.sd != null) {
+        oprot.writeFieldBegin(SD_FIELD_DESC);
+        struct.sd.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.parameters != null) {
+        oprot.writeFieldBegin(PARAMETERS_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size()));
+          for (Map.Entry<String, String> _iter224 : struct.parameters.entrySet())
+          {
+            oprot.writeString(_iter224.getKey());
+            oprot.writeString(_iter224.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.privileges != null) {
+        if (struct.isSetPrivileges()) {
+          oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+          struct.privileges.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionTupleSchemeFactory implements SchemeFactory {
+    public PartitionTupleScheme getScheme() {
+      return new PartitionTupleScheme();
+    }
+  }
+
+  private static class PartitionTupleScheme extends TupleScheme<Partition> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetValues()) {
+        optionals.set(0);
+      }
+      if (struct.isSetDbName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetTableName()) {
+        optionals.set(2);
+      }
+      if (struct.isSetCreateTime()) {
+        optionals.set(3);
+      }
+      if (struct.isSetLastAccessTime()) {
+        optionals.set(4);
+      }
+      if (struct.isSetSd()) {
+        optionals.set(5);
+      }
+      if (struct.isSetParameters()) {
+        optionals.set(6);
+      }
+      if (struct.isSetPrivileges()) {
+        optionals.set(7);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(8);
+      }
+      oprot.writeBitSet(optionals, 9);
+      if (struct.isSetValues()) {
+        {
+          oprot.writeI32(struct.values.size());
+          for (String _iter225 : struct.values)
+          {
+            oprot.writeString(_iter225);
+          }
+        }
+      }
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetTableName()) {
+        oprot.writeString(struct.tableName);
+      }
+      if (struct.isSetCreateTime()) {
+        oprot.writeI32(struct.createTime);
+      }
+      if (struct.isSetLastAccessTime()) {
+        oprot.writeI32(struct.lastAccessTime);
+      }
+      if (struct.isSetSd()) {
+        struct.sd.write(oprot);
+      }
+      if (struct.isSetParameters()) {
+        {
+          oprot.writeI32(struct.parameters.size());
+          for (Map.Entry<String, String> _iter226 : struct.parameters.entrySet())
+          {
+            oprot.writeString(_iter226.getKey());
+            oprot.writeString(_iter226.getValue());
+          }
+        }
+      }
+      if (struct.isSetPrivileges()) {
+        struct.privileges.write(oprot);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(9);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.values = new ArrayList<String>(_list227.size);
+          String _elem228;
+          for (int _i229 = 0; _i229 < _list227.size; ++_i229)
+          {
+            _elem228 = iprot.readString();
+            struct.values.add(_elem228);
+          }
+        }
+        struct.setValuesIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setTableNameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.createTime = iprot.readI32();
+        struct.setCreateTimeIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.lastAccessTime = iprot.readI32();
+        struct.setLastAccessTimeIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.sd = new StorageDescriptor();
+        struct.sd.read(iprot);
+        struct.setSdIsSet(true);
+      }
+      if (incoming.get(6)) {
+        {
+          org.apache.thrift.protocol.TMap _map230 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.parameters = new HashMap<String,String>(2*_map230.size);
+          String _key231;
+          String _val232;
+          for (int _i233 = 0; _i233 < _map230.size; ++_i233)
+          {
+            _key231 = iprot.readString();
+            _val232 = iprot.readString();
+            struct.parameters.put(_key231, _val232);
+          }
+        }
+        struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.privileges = new PrincipalPrivilegeSet();
+        struct.privileges.read(iprot);
+        struct.setPrivilegesIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionEventType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionEventType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionEventType.java
new file mode 100644
index 0000000..b515401
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionEventType.java
@@ -0,0 +1,42 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum PartitionEventType implements org.apache.thrift.TEnum {
+  LOAD_DONE(1);
+
+  private final int value;
+
+  private PartitionEventType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static PartitionEventType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return LOAD_DONE;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
new file mode 100644
index 0000000..1f32e38
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java
@@ -0,0 +1,449 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionListComposingSpec implements org.apache.thrift.TBase<PartitionListComposingSpec, PartitionListComposingSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionListComposingSpec> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionListComposingSpec");
+
+  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionListComposingSpecStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionListComposingSpecTupleSchemeFactory());
+  }
+
+  private List<Partition> partitions; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARTITIONS((short)1, "partitions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARTITIONS
+          return PARTITIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionListComposingSpec.class, metaDataMap);
+  }
+
+  public PartitionListComposingSpec() {
+  }
+
+  public PartitionListComposingSpec(
+    List<Partition> partitions)
+  {
+    this();
+    this.partitions = partitions;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionListComposingSpec(PartitionListComposingSpec other) {
+    if (other.isSetPartitions()) {
+      List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
+      for (Partition other_element : other.partitions) {
+        __this__partitions.add(new Partition(other_element));
+      }
+      this.partitions = __this__partitions;
+    }
+  }
+
+  public PartitionListComposingSpec deepCopy() {
+    return new PartitionListComposingSpec(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partitions = null;
+  }
+
+  public int getPartitionsSize() {
+    return (this.partitions == null) ? 0 : this.partitions.size();
+  }
+
+  public java.util.Iterator<Partition> getPartitionsIterator() {
+    return (this.partitions == null) ? null : this.partitions.iterator();
+  }
+
+  public void addToPartitions(Partition elem) {
+    if (this.partitions == null) {
+      this.partitions = new ArrayList<Partition>();
+    }
+    this.partitions.add(elem);
+  }
+
+  public List<Partition> getPartitions() {
+    return this.partitions;
+  }
+
+  public void setPartitions(List<Partition> partitions) {
+    this.partitions = partitions;
+  }
+
+  public void unsetPartitions() {
+    this.partitions = null;
+  }
+
+  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitions() {
+    return this.partitions != null;
+  }
+
+  public void setPartitionsIsSet(boolean value) {
+    if (!value) {
+      this.partitions = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARTITIONS:
+      if (value == null) {
+        unsetPartitions();
+      } else {
+        setPartitions((List<Partition>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARTITIONS:
+      return getPartitions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARTITIONS:
+      return isSetPartitions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionListComposingSpec)
+      return this.equals((PartitionListComposingSpec)that);
+    return false;
+  }
+
+  public boolean equals(PartitionListComposingSpec that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partitions = true && this.isSetPartitions();
+    boolean that_present_partitions = true && that.isSetPartitions();
+    if (this_present_partitions || that_present_partitions) {
+      if (!(this_present_partitions && that_present_partitions))
+        return false;
+      if (!this.partitions.equals(that.partitions))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partitions = true && (isSetPartitions());
+    list.add(present_partitions);
+    if (present_partitions)
+      list.add(partitions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionListComposingSpec other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionListComposingSpec(");
+    boolean first = true;
+
+    sb.append("partitions:");
+    if (this.partitions == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partitions);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionListComposingSpecStandardSchemeFactory implements SchemeFactory {
+    public PartitionListComposingSpecStandardScheme getScheme() {
+      return new PartitionListComposingSpecStandardScheme();
+    }
+  }
+
+  private static class PartitionListComposingSpecStandardScheme extends StandardScheme<PartitionListComposingSpec> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionListComposingSpec struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARTITIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list260 = iprot.readListBegin();
+                struct.partitions = new ArrayList<Partition>(_list260.size);
+                Partition _elem261;
+                for (int _i262 = 0; _i262 < _list260.size; ++_i262)
+                {
+                  _elem261 = new Partition();
+                  _elem261.read(iprot);
+                  struct.partitions.add(_elem261);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionListComposingSpec struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partitions != null) {
+        oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+          for (Partition _iter263 : struct.partitions)
+          {
+            _iter263.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionListComposingSpecTupleSchemeFactory implements SchemeFactory {
+    public PartitionListComposingSpecTupleScheme getScheme() {
+      return new PartitionListComposingSpecTupleScheme();
+    }
+  }
+
+  private static class PartitionListComposingSpecTupleScheme extends TupleScheme<PartitionListComposingSpec> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionListComposingSpec struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartitions()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPartitions()) {
+        {
+          oprot.writeI32(struct.partitions.size());
+          for (Partition _iter264 : struct.partitions)
+          {
+            _iter264.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionListComposingSpec struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list265 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.partitions = new ArrayList<Partition>(_list265.size);
+          Partition _elem266;
+          for (int _i267 = 0; _i267 < _list265.size; ++_i267)
+          {
+            _elem266 = new Partition();
+            _elem266.read(iprot);
+            struct.partitions.add(_elem266);
+          }
+        }
+        struct.setPartitionsIsSet(true);
+      }
+    }
+  }
+
+}
+


[03/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java
new file mode 100644
index 0000000..7b8a257
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLDefaultConstraint.java
@@ -0,0 +1,1213 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLDefaultConstraint implements org.apache.thrift.TBase<SQLDefaultConstraint, SQLDefaultConstraint._Fields>, java.io.Serializable, Cloneable, Comparable<SQLDefaultConstraint> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLDefaultConstraint");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField DEFAULT_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("default_value", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7);
+  private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8);
+  private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new SQLDefaultConstraintStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new SQLDefaultConstraintTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String table_db; // required
+  private String table_name; // required
+  private String column_name; // required
+  private String default_value; // required
+  private String dc_name; // required
+  private boolean enable_cstr; // required
+  private boolean validate_cstr; // required
+  private boolean rely_cstr; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    TABLE_DB((short)2, "table_db"),
+    TABLE_NAME((short)3, "table_name"),
+    COLUMN_NAME((short)4, "column_name"),
+    DEFAULT_VALUE((short)5, "default_value"),
+    DC_NAME((short)6, "dc_name"),
+    ENABLE_CSTR((short)7, "enable_cstr"),
+    VALIDATE_CSTR((short)8, "validate_cstr"),
+    RELY_CSTR((short)9, "rely_cstr");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // TABLE_DB
+          return TABLE_DB;
+        case 3: // TABLE_NAME
+          return TABLE_NAME;
+        case 4: // COLUMN_NAME
+          return COLUMN_NAME;
+        case 5: // DEFAULT_VALUE
+          return DEFAULT_VALUE;
+        case 6: // DC_NAME
+          return DC_NAME;
+        case 7: // ENABLE_CSTR
+          return ENABLE_CSTR;
+        case 8: // VALIDATE_CSTR
+          return VALIDATE_CSTR;
+        case 9: // RELY_CSTR
+          return RELY_CSTR;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ENABLE_CSTR_ISSET_ID = 0;
+  private static final int __VALIDATE_CSTR_ISSET_ID = 1;
+  private static final int __RELY_CSTR_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("column_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DEFAULT_VALUE, new org.apache.thrift.meta_data.FieldMetaData("default_value", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DC_NAME, new org.apache.thrift.meta_data.FieldMetaData("dc_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ENABLE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("enable_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.VALIDATE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("validate_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLDefaultConstraint.class, metaDataMap);
+  }
+
+  public SQLDefaultConstraint() {
+  }
+
+  public SQLDefaultConstraint(
+    String catName,
+    String table_db,
+    String table_name,
+    String column_name,
+    String default_value,
+    String dc_name,
+    boolean enable_cstr,
+    boolean validate_cstr,
+    boolean rely_cstr)
+  {
+    this();
+    this.catName = catName;
+    this.table_db = table_db;
+    this.table_name = table_name;
+    this.column_name = column_name;
+    this.default_value = default_value;
+    this.dc_name = dc_name;
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public SQLDefaultConstraint(SQLDefaultConstraint other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetTable_db()) {
+      this.table_db = other.table_db;
+    }
+    if (other.isSetTable_name()) {
+      this.table_name = other.table_name;
+    }
+    if (other.isSetColumn_name()) {
+      this.column_name = other.column_name;
+    }
+    if (other.isSetDefault_value()) {
+      this.default_value = other.default_value;
+    }
+    if (other.isSetDc_name()) {
+      this.dc_name = other.dc_name;
+    }
+    this.enable_cstr = other.enable_cstr;
+    this.validate_cstr = other.validate_cstr;
+    this.rely_cstr = other.rely_cstr;
+  }
+
+  public SQLDefaultConstraint deepCopy() {
+    return new SQLDefaultConstraint(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.table_db = null;
+    this.table_name = null;
+    this.column_name = null;
+    this.default_value = null;
+    this.dc_name = null;
+    setEnable_cstrIsSet(false);
+    this.enable_cstr = false;
+    setValidate_cstrIsSet(false);
+    this.validate_cstr = false;
+    setRely_cstrIsSet(false);
+    this.rely_cstr = false;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getTable_db() {
+    return this.table_db;
+  }
+
+  public void setTable_db(String table_db) {
+    this.table_db = table_db;
+  }
+
+  public void unsetTable_db() {
+    this.table_db = null;
+  }
+
+  /** Returns true if field table_db is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable_db() {
+    return this.table_db != null;
+  }
+
+  public void setTable_dbIsSet(boolean value) {
+    if (!value) {
+      this.table_db = null;
+    }
+  }
+
+  public String getTable_name() {
+    return this.table_name;
+  }
+
+  public void setTable_name(String table_name) {
+    this.table_name = table_name;
+  }
+
+  public void unsetTable_name() {
+    this.table_name = null;
+  }
+
+  /** Returns true if field table_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable_name() {
+    return this.table_name != null;
+  }
+
+  public void setTable_nameIsSet(boolean value) {
+    if (!value) {
+      this.table_name = null;
+    }
+  }
+
+  public String getColumn_name() {
+    return this.column_name;
+  }
+
+  public void setColumn_name(String column_name) {
+    this.column_name = column_name;
+  }
+
+  public void unsetColumn_name() {
+    this.column_name = null;
+  }
+
+  /** Returns true if field column_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetColumn_name() {
+    return this.column_name != null;
+  }
+
+  public void setColumn_nameIsSet(boolean value) {
+    if (!value) {
+      this.column_name = null;
+    }
+  }
+
+  public String getDefault_value() {
+    return this.default_value;
+  }
+
+  public void setDefault_value(String default_value) {
+    this.default_value = default_value;
+  }
+
+  public void unsetDefault_value() {
+    this.default_value = null;
+  }
+
+  /** Returns true if field default_value is set (has been assigned a value) and false otherwise */
+  public boolean isSetDefault_value() {
+    return this.default_value != null;
+  }
+
+  public void setDefault_valueIsSet(boolean value) {
+    if (!value) {
+      this.default_value = null;
+    }
+  }
+
+  public String getDc_name() {
+    return this.dc_name;
+  }
+
+  public void setDc_name(String dc_name) {
+    this.dc_name = dc_name;
+  }
+
+  public void unsetDc_name() {
+    this.dc_name = null;
+  }
+
+  /** Returns true if field dc_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetDc_name() {
+    return this.dc_name != null;
+  }
+
+  public void setDc_nameIsSet(boolean value) {
+    if (!value) {
+      this.dc_name = null;
+    }
+  }
+
+  public boolean isEnable_cstr() {
+    return this.enable_cstr;
+  }
+
+  public void setEnable_cstr(boolean enable_cstr) {
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+  }
+
+  public void unsetEnable_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field enable_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetEnable_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  public void setEnable_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isValidate_cstr() {
+    return this.validate_cstr;
+  }
+
+  public void setValidate_cstr(boolean validate_cstr) {
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+  }
+
+  public void unsetValidate_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field validate_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidate_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  public void setValidate_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isRely_cstr() {
+    return this.rely_cstr;
+  }
+
+  public void setRely_cstr(boolean rely_cstr) {
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  public void unsetRely_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field rely_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetRely_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  public void setRely_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case TABLE_DB:
+      if (value == null) {
+        unsetTable_db();
+      } else {
+        setTable_db((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTable_name();
+      } else {
+        setTable_name((String)value);
+      }
+      break;
+
+    case COLUMN_NAME:
+      if (value == null) {
+        unsetColumn_name();
+      } else {
+        setColumn_name((String)value);
+      }
+      break;
+
+    case DEFAULT_VALUE:
+      if (value == null) {
+        unsetDefault_value();
+      } else {
+        setDefault_value((String)value);
+      }
+      break;
+
+    case DC_NAME:
+      if (value == null) {
+        unsetDc_name();
+      } else {
+        setDc_name((String)value);
+      }
+      break;
+
+    case ENABLE_CSTR:
+      if (value == null) {
+        unsetEnable_cstr();
+      } else {
+        setEnable_cstr((Boolean)value);
+      }
+      break;
+
+    case VALIDATE_CSTR:
+      if (value == null) {
+        unsetValidate_cstr();
+      } else {
+        setValidate_cstr((Boolean)value);
+      }
+      break;
+
+    case RELY_CSTR:
+      if (value == null) {
+        unsetRely_cstr();
+      } else {
+        setRely_cstr((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case TABLE_DB:
+      return getTable_db();
+
+    case TABLE_NAME:
+      return getTable_name();
+
+    case COLUMN_NAME:
+      return getColumn_name();
+
+    case DEFAULT_VALUE:
+      return getDefault_value();
+
+    case DC_NAME:
+      return getDc_name();
+
+    case ENABLE_CSTR:
+      return isEnable_cstr();
+
+    case VALIDATE_CSTR:
+      return isValidate_cstr();
+
+    case RELY_CSTR:
+      return isRely_cstr();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case TABLE_DB:
+      return isSetTable_db();
+    case TABLE_NAME:
+      return isSetTable_name();
+    case COLUMN_NAME:
+      return isSetColumn_name();
+    case DEFAULT_VALUE:
+      return isSetDefault_value();
+    case DC_NAME:
+      return isSetDc_name();
+    case ENABLE_CSTR:
+      return isSetEnable_cstr();
+    case VALIDATE_CSTR:
+      return isSetValidate_cstr();
+    case RELY_CSTR:
+      return isSetRely_cstr();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof SQLDefaultConstraint)
+      return this.equals((SQLDefaultConstraint)that);
+    return false;
+  }
+
+  public boolean equals(SQLDefaultConstraint that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_table_db = true && this.isSetTable_db();
+    boolean that_present_table_db = true && that.isSetTable_db();
+    if (this_present_table_db || that_present_table_db) {
+      if (!(this_present_table_db && that_present_table_db))
+        return false;
+      if (!this.table_db.equals(that.table_db))
+        return false;
+    }
+
+    boolean this_present_table_name = true && this.isSetTable_name();
+    boolean that_present_table_name = true && that.isSetTable_name();
+    if (this_present_table_name || that_present_table_name) {
+      if (!(this_present_table_name && that_present_table_name))
+        return false;
+      if (!this.table_name.equals(that.table_name))
+        return false;
+    }
+
+    boolean this_present_column_name = true && this.isSetColumn_name();
+    boolean that_present_column_name = true && that.isSetColumn_name();
+    if (this_present_column_name || that_present_column_name) {
+      if (!(this_present_column_name && that_present_column_name))
+        return false;
+      if (!this.column_name.equals(that.column_name))
+        return false;
+    }
+
+    boolean this_present_default_value = true && this.isSetDefault_value();
+    boolean that_present_default_value = true && that.isSetDefault_value();
+    if (this_present_default_value || that_present_default_value) {
+      if (!(this_present_default_value && that_present_default_value))
+        return false;
+      if (!this.default_value.equals(that.default_value))
+        return false;
+    }
+
+    boolean this_present_dc_name = true && this.isSetDc_name();
+    boolean that_present_dc_name = true && that.isSetDc_name();
+    if (this_present_dc_name || that_present_dc_name) {
+      if (!(this_present_dc_name && that_present_dc_name))
+        return false;
+      if (!this.dc_name.equals(that.dc_name))
+        return false;
+    }
+
+    boolean this_present_enable_cstr = true;
+    boolean that_present_enable_cstr = true;
+    if (this_present_enable_cstr || that_present_enable_cstr) {
+      if (!(this_present_enable_cstr && that_present_enable_cstr))
+        return false;
+      if (this.enable_cstr != that.enable_cstr)
+        return false;
+    }
+
+    boolean this_present_validate_cstr = true;
+    boolean that_present_validate_cstr = true;
+    if (this_present_validate_cstr || that_present_validate_cstr) {
+      if (!(this_present_validate_cstr && that_present_validate_cstr))
+        return false;
+      if (this.validate_cstr != that.validate_cstr)
+        return false;
+    }
+
+    boolean this_present_rely_cstr = true;
+    boolean that_present_rely_cstr = true;
+    if (this_present_rely_cstr || that_present_rely_cstr) {
+      if (!(this_present_rely_cstr && that_present_rely_cstr))
+        return false;
+      if (this.rely_cstr != that.rely_cstr)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_table_db = true && (isSetTable_db());
+    list.add(present_table_db);
+    if (present_table_db)
+      list.add(table_db);
+
+    boolean present_table_name = true && (isSetTable_name());
+    list.add(present_table_name);
+    if (present_table_name)
+      list.add(table_name);
+
+    boolean present_column_name = true && (isSetColumn_name());
+    list.add(present_column_name);
+    if (present_column_name)
+      list.add(column_name);
+
+    boolean present_default_value = true && (isSetDefault_value());
+    list.add(present_default_value);
+    if (present_default_value)
+      list.add(default_value);
+
+    boolean present_dc_name = true && (isSetDc_name());
+    list.add(present_dc_name);
+    if (present_dc_name)
+      list.add(dc_name);
+
+    boolean present_enable_cstr = true;
+    list.add(present_enable_cstr);
+    if (present_enable_cstr)
+      list.add(enable_cstr);
+
+    boolean present_validate_cstr = true;
+    list.add(present_validate_cstr);
+    if (present_validate_cstr)
+      list.add(validate_cstr);
+
+    boolean present_rely_cstr = true;
+    list.add(present_rely_cstr);
+    if (present_rely_cstr)
+      list.add(rely_cstr);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(SQLDefaultConstraint other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable_db()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_db, other.table_db);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTable_name()).compareTo(other.isSetTable_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_name, other.table_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColumn_name()).compareTo(other.isSetColumn_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColumn_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_name, other.column_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDefault_value()).compareTo(other.isSetDefault_value());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDefault_value()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.default_value, other.default_value);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDc_name()).compareTo(other.isSetDc_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDc_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dc_name, other.dc_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEnable_cstr()).compareTo(other.isSetEnable_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEnable_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.enable_cstr, other.enable_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidate_cstr()).compareTo(other.isSetValidate_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidate_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validate_cstr, other.validate_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRely_cstr()).compareTo(other.isSetRely_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRely_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rely_cstr, other.rely_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("SQLDefaultConstraint(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("table_db:");
+    if (this.table_db == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table_db);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("table_name:");
+    if (this.table_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("column_name:");
+    if (this.column_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.column_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("default_value:");
+    if (this.default_value == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.default_value);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dc_name:");
+    if (this.dc_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dc_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("enable_cstr:");
+    sb.append(this.enable_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validate_cstr:");
+    sb.append(this.validate_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("rely_cstr:");
+    sb.append(this.rely_cstr);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class SQLDefaultConstraintStandardSchemeFactory implements SchemeFactory {
+    public SQLDefaultConstraintStandardScheme getScheme() {
+      return new SQLDefaultConstraintStandardScheme();
+    }
+  }
+
+  private static class SQLDefaultConstraintStandardScheme extends StandardScheme<SQLDefaultConstraint> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SQLDefaultConstraint struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_DB
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.table_db = iprot.readString();
+              struct.setTable_dbIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.table_name = iprot.readString();
+              struct.setTable_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // COLUMN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.column_name = iprot.readString();
+              struct.setColumn_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // DEFAULT_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.default_value = iprot.readString();
+              struct.setDefault_valueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // DC_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dc_name = iprot.readString();
+              struct.setDc_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // ENABLE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.enable_cstr = iprot.readBool();
+              struct.setEnable_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // VALIDATE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.validate_cstr = iprot.readBool();
+              struct.setValidate_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // RELY_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.rely_cstr = iprot.readBool();
+              struct.setRely_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SQLDefaultConstraint struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.table_db != null) {
+        oprot.writeFieldBegin(TABLE_DB_FIELD_DESC);
+        oprot.writeString(struct.table_db);
+        oprot.writeFieldEnd();
+      }
+      if (struct.table_name != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.table_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.column_name != null) {
+        oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC);
+        oprot.writeString(struct.column_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.default_value != null) {
+        oprot.writeFieldBegin(DEFAULT_VALUE_FIELD_DESC);
+        oprot.writeString(struct.default_value);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dc_name != null) {
+        oprot.writeFieldBegin(DC_NAME_FIELD_DESC);
+        oprot.writeString(struct.dc_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(ENABLE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.enable_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(VALIDATE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.validate_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.rely_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class SQLDefaultConstraintTupleSchemeFactory implements SchemeFactory {
+    public SQLDefaultConstraintTupleScheme getScheme() {
+      return new SQLDefaultConstraintTupleScheme();
+    }
+  }
+
+  private static class SQLDefaultConstraintTupleScheme extends TupleScheme<SQLDefaultConstraint> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTable_db()) {
+        optionals.set(1);
+      }
+      if (struct.isSetTable_name()) {
+        optionals.set(2);
+      }
+      if (struct.isSetColumn_name()) {
+        optionals.set(3);
+      }
+      if (struct.isSetDefault_value()) {
+        optionals.set(4);
+      }
+      if (struct.isSetDc_name()) {
+        optionals.set(5);
+      }
+      if (struct.isSetEnable_cstr()) {
+        optionals.set(6);
+      }
+      if (struct.isSetValidate_cstr()) {
+        optionals.set(7);
+      }
+      if (struct.isSetRely_cstr()) {
+        optionals.set(8);
+      }
+      oprot.writeBitSet(optionals, 9);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+      if (struct.isSetTable_db()) {
+        oprot.writeString(struct.table_db);
+      }
+      if (struct.isSetTable_name()) {
+        oprot.writeString(struct.table_name);
+      }
+      if (struct.isSetColumn_name()) {
+        oprot.writeString(struct.column_name);
+      }
+      if (struct.isSetDefault_value()) {
+        oprot.writeString(struct.default_value);
+      }
+      if (struct.isSetDc_name()) {
+        oprot.writeString(struct.dc_name);
+      }
+      if (struct.isSetEnable_cstr()) {
+        oprot.writeBool(struct.enable_cstr);
+      }
+      if (struct.isSetValidate_cstr()) {
+        oprot.writeBool(struct.validate_cstr);
+      }
+      if (struct.isSetRely_cstr()) {
+        oprot.writeBool(struct.rely_cstr);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, SQLDefaultConstraint struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(9);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.table_db = iprot.readString();
+        struct.setTable_dbIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.table_name = iprot.readString();
+        struct.setTable_nameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.column_name = iprot.readString();
+        struct.setColumn_nameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.default_value = iprot.readString();
+        struct.setDefault_valueIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.dc_name = iprot.readString();
+        struct.setDc_nameIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.enable_cstr = iprot.readBool();
+        struct.setEnable_cstrIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.validate_cstr = iprot.readBool();
+        struct.setValidate_cstrIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.rely_cstr = iprot.readBool();
+        struct.setRely_cstrIsSet(true);
+      }
+    }
+  }
+
+}
+


[29/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java
new file mode 100644
index 0000000..d5230c9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FindSchemasByColsRqst.java
@@ -0,0 +1,605 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FindSchemasByColsRqst implements org.apache.thrift.TBase<FindSchemasByColsRqst, FindSchemasByColsRqst._Fields>, java.io.Serializable, Cloneable, Comparable<FindSchemasByColsRqst> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FindSchemasByColsRqst");
+
+  private static final org.apache.thrift.protocol.TField COL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("colName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField COL_NAMESPACE_FIELD_DESC = new org.apache.thrift.protocol.TField("colNamespace", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FindSchemasByColsRqstStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FindSchemasByColsRqstTupleSchemeFactory());
+  }
+
+  private String colName; // optional
+  private String colNamespace; // optional
+  private String type; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    COL_NAME((short)1, "colName"),
+    COL_NAMESPACE((short)2, "colNamespace"),
+    TYPE((short)3, "type");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // COL_NAME
+          return COL_NAME;
+        case 2: // COL_NAMESPACE
+          return COL_NAMESPACE;
+        case 3: // TYPE
+          return TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.COL_NAME,_Fields.COL_NAMESPACE,_Fields.TYPE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.COL_NAME, new org.apache.thrift.meta_data.FieldMetaData("colName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COL_NAMESPACE, new org.apache.thrift.meta_data.FieldMetaData("colNamespace", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FindSchemasByColsRqst.class, metaDataMap);
+  }
+
+  public FindSchemasByColsRqst() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FindSchemasByColsRqst(FindSchemasByColsRqst other) {
+    if (other.isSetColName()) {
+      this.colName = other.colName;
+    }
+    if (other.isSetColNamespace()) {
+      this.colNamespace = other.colNamespace;
+    }
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+  }
+
+  public FindSchemasByColsRqst deepCopy() {
+    return new FindSchemasByColsRqst(this);
+  }
+
+  @Override
+  public void clear() {
+    this.colName = null;
+    this.colNamespace = null;
+    this.type = null;
+  }
+
+  public String getColName() {
+    return this.colName;
+  }
+
+  public void setColName(String colName) {
+    this.colName = colName;
+  }
+
+  public void unsetColName() {
+    this.colName = null;
+  }
+
+  /** Returns true if field colName is set (has been assigned a value) and false otherwise */
+  public boolean isSetColName() {
+    return this.colName != null;
+  }
+
+  public void setColNameIsSet(boolean value) {
+    if (!value) {
+      this.colName = null;
+    }
+  }
+
+  public String getColNamespace() {
+    return this.colNamespace;
+  }
+
+  public void setColNamespace(String colNamespace) {
+    this.colNamespace = colNamespace;
+  }
+
+  public void unsetColNamespace() {
+    this.colNamespace = null;
+  }
+
+  /** Returns true if field colNamespace is set (has been assigned a value) and false otherwise */
+  public boolean isSetColNamespace() {
+    return this.colNamespace != null;
+  }
+
+  public void setColNamespaceIsSet(boolean value) {
+    if (!value) {
+      this.colNamespace = null;
+    }
+  }
+
+  public String getType() {
+    return this.type;
+  }
+
+  public void setType(String type) {
+    this.type = type;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case COL_NAME:
+      if (value == null) {
+        unsetColName();
+      } else {
+        setColName((String)value);
+      }
+      break;
+
+    case COL_NAMESPACE:
+      if (value == null) {
+        unsetColNamespace();
+      } else {
+        setColNamespace((String)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case COL_NAME:
+      return getColName();
+
+    case COL_NAMESPACE:
+      return getColNamespace();
+
+    case TYPE:
+      return getType();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case COL_NAME:
+      return isSetColName();
+    case COL_NAMESPACE:
+      return isSetColNamespace();
+    case TYPE:
+      return isSetType();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FindSchemasByColsRqst)
+      return this.equals((FindSchemasByColsRqst)that);
+    return false;
+  }
+
+  public boolean equals(FindSchemasByColsRqst that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_colName = true && this.isSetColName();
+    boolean that_present_colName = true && that.isSetColName();
+    if (this_present_colName || that_present_colName) {
+      if (!(this_present_colName && that_present_colName))
+        return false;
+      if (!this.colName.equals(that.colName))
+        return false;
+    }
+
+    boolean this_present_colNamespace = true && this.isSetColNamespace();
+    boolean that_present_colNamespace = true && that.isSetColNamespace();
+    if (this_present_colNamespace || that_present_colNamespace) {
+      if (!(this_present_colNamespace && that_present_colNamespace))
+        return false;
+      if (!this.colNamespace.equals(that.colNamespace))
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_colName = true && (isSetColName());
+    list.add(present_colName);
+    if (present_colName)
+      list.add(colName);
+
+    boolean present_colNamespace = true && (isSetColNamespace());
+    list.add(present_colNamespace);
+    if (present_colNamespace)
+      list.add(colNamespace);
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FindSchemasByColsRqst other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetColName()).compareTo(other.isSetColName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colName, other.colName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColNamespace()).compareTo(other.isSetColNamespace());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColNamespace()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colNamespace, other.colNamespace);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FindSchemasByColsRqst(");
+    boolean first = true;
+
+    if (isSetColName()) {
+      sb.append("colName:");
+      if (this.colName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.colName);
+      }
+      first = false;
+    }
+    if (isSetColNamespace()) {
+      if (!first) sb.append(", ");
+      sb.append("colNamespace:");
+      if (this.colNamespace == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.colNamespace);
+      }
+      first = false;
+    }
+    if (isSetType()) {
+      if (!first) sb.append(", ");
+      sb.append("type:");
+      if (this.type == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.type);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FindSchemasByColsRqstStandardSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRqstStandardScheme getScheme() {
+      return new FindSchemasByColsRqstStandardScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRqstStandardScheme extends StandardScheme<FindSchemasByColsRqst> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // COL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.colName = iprot.readString();
+              struct.setColNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // COL_NAMESPACE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.colNamespace = iprot.readString();
+              struct.setColNamespaceIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.type = iprot.readString();
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.colName != null) {
+        if (struct.isSetColName()) {
+          oprot.writeFieldBegin(COL_NAME_FIELD_DESC);
+          oprot.writeString(struct.colName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.colNamespace != null) {
+        if (struct.isSetColNamespace()) {
+          oprot.writeFieldBegin(COL_NAMESPACE_FIELD_DESC);
+          oprot.writeString(struct.colNamespace);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.type != null) {
+        if (struct.isSetType()) {
+          oprot.writeFieldBegin(TYPE_FIELD_DESC);
+          oprot.writeString(struct.type);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FindSchemasByColsRqstTupleSchemeFactory implements SchemeFactory {
+    public FindSchemasByColsRqstTupleScheme getScheme() {
+      return new FindSchemasByColsRqstTupleScheme();
+    }
+  }
+
+  private static class FindSchemasByColsRqstTupleScheme extends TupleScheme<FindSchemasByColsRqst> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetColName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetColNamespace()) {
+        optionals.set(1);
+      }
+      if (struct.isSetType()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetColName()) {
+        oprot.writeString(struct.colName);
+      }
+      if (struct.isSetColNamespace()) {
+        oprot.writeString(struct.colNamespace);
+      }
+      if (struct.isSetType()) {
+        oprot.writeString(struct.type);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FindSchemasByColsRqst struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.colName = iprot.readString();
+        struct.setColNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.colNamespace = iprot.readString();
+        struct.setColNamespaceIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.type = iprot.readString();
+        struct.setTypeIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
new file mode 100644
index 0000000..2560922
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java
@@ -0,0 +1,967 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FireEventRequest implements org.apache.thrift.TBase<FireEventRequest, FireEventRequest._Fields>, java.io.Serializable, Cloneable, Comparable<FireEventRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventRequest");
+
+  private static final org.apache.thrift.protocol.TField SUCCESSFUL_FIELD_DESC = new org.apache.thrift.protocol.TField("successful", org.apache.thrift.protocol.TType.BOOL, (short)1);
+  private static final org.apache.thrift.protocol.TField DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("data", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField PARTITION_VALS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionVals", org.apache.thrift.protocol.TType.LIST, (short)5);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FireEventRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FireEventRequestTupleSchemeFactory());
+  }
+
+  private boolean successful; // required
+  private FireEventRequestData data; // required
+  private String dbName; // optional
+  private String tableName; // optional
+  private List<String> partitionVals; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SUCCESSFUL((short)1, "successful"),
+    DATA((short)2, "data"),
+    DB_NAME((short)3, "dbName"),
+    TABLE_NAME((short)4, "tableName"),
+    PARTITION_VALS((short)5, "partitionVals"),
+    CAT_NAME((short)6, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SUCCESSFUL
+          return SUCCESSFUL;
+        case 2: // DATA
+          return DATA;
+        case 3: // DB_NAME
+          return DB_NAME;
+        case 4: // TABLE_NAME
+          return TABLE_NAME;
+        case 5: // PARTITION_VALS
+          return PARTITION_VALS;
+        case 6: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __SUCCESSFUL_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.PARTITION_VALS,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SUCCESSFUL, new org.apache.thrift.meta_data.FieldMetaData("successful", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.DATA, new org.apache.thrift.meta_data.FieldMetaData("data", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FireEventRequestData.class)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITION_VALS, new org.apache.thrift.meta_data.FieldMetaData("partitionVals", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FireEventRequest.class, metaDataMap);
+  }
+
+  public FireEventRequest() {
+  }
+
+  public FireEventRequest(
+    boolean successful,
+    FireEventRequestData data)
+  {
+    this();
+    this.successful = successful;
+    setSuccessfulIsSet(true);
+    this.data = data;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FireEventRequest(FireEventRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.successful = other.successful;
+    if (other.isSetData()) {
+      this.data = new FireEventRequestData(other.data);
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetPartitionVals()) {
+      List<String> __this__partitionVals = new ArrayList<String>(other.partitionVals);
+      this.partitionVals = __this__partitionVals;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public FireEventRequest deepCopy() {
+    return new FireEventRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setSuccessfulIsSet(false);
+    this.successful = false;
+    this.data = null;
+    this.dbName = null;
+    this.tableName = null;
+    this.partitionVals = null;
+    this.catName = null;
+  }
+
+  public boolean isSuccessful() {
+    return this.successful;
+  }
+
+  public void setSuccessful(boolean successful) {
+    this.successful = successful;
+    setSuccessfulIsSet(true);
+  }
+
+  public void unsetSuccessful() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESSFUL_ISSET_ID);
+  }
+
+  /** Returns true if field successful is set (has been assigned a value) and false otherwise */
+  public boolean isSetSuccessful() {
+    return EncodingUtils.testBit(__isset_bitfield, __SUCCESSFUL_ISSET_ID);
+  }
+
+  public void setSuccessfulIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESSFUL_ISSET_ID, value);
+  }
+
+  public FireEventRequestData getData() {
+    return this.data;
+  }
+
+  public void setData(FireEventRequestData data) {
+    this.data = data;
+  }
+
+  public void unsetData() {
+    this.data = null;
+  }
+
+  /** Returns true if field data is set (has been assigned a value) and false otherwise */
+  public boolean isSetData() {
+    return this.data != null;
+  }
+
+  public void setDataIsSet(boolean value) {
+    if (!value) {
+      this.data = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public int getPartitionValsSize() {
+    return (this.partitionVals == null) ? 0 : this.partitionVals.size();
+  }
+
+  public java.util.Iterator<String> getPartitionValsIterator() {
+    return (this.partitionVals == null) ? null : this.partitionVals.iterator();
+  }
+
+  public void addToPartitionVals(String elem) {
+    if (this.partitionVals == null) {
+      this.partitionVals = new ArrayList<String>();
+    }
+    this.partitionVals.add(elem);
+  }
+
+  public List<String> getPartitionVals() {
+    return this.partitionVals;
+  }
+
+  public void setPartitionVals(List<String> partitionVals) {
+    this.partitionVals = partitionVals;
+  }
+
+  public void unsetPartitionVals() {
+    this.partitionVals = null;
+  }
+
+  /** Returns true if field partitionVals is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionVals() {
+    return this.partitionVals != null;
+  }
+
+  public void setPartitionValsIsSet(boolean value) {
+    if (!value) {
+      this.partitionVals = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SUCCESSFUL:
+      if (value == null) {
+        unsetSuccessful();
+      } else {
+        setSuccessful((Boolean)value);
+      }
+      break;
+
+    case DATA:
+      if (value == null) {
+        unsetData();
+      } else {
+        setData((FireEventRequestData)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case PARTITION_VALS:
+      if (value == null) {
+        unsetPartitionVals();
+      } else {
+        setPartitionVals((List<String>)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SUCCESSFUL:
+      return isSuccessful();
+
+    case DATA:
+      return getData();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case PARTITION_VALS:
+      return getPartitionVals();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SUCCESSFUL:
+      return isSetSuccessful();
+    case DATA:
+      return isSetData();
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case PARTITION_VALS:
+      return isSetPartitionVals();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FireEventRequest)
+      return this.equals((FireEventRequest)that);
+    return false;
+  }
+
+  public boolean equals(FireEventRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_successful = true;
+    boolean that_present_successful = true;
+    if (this_present_successful || that_present_successful) {
+      if (!(this_present_successful && that_present_successful))
+        return false;
+      if (this.successful != that.successful)
+        return false;
+    }
+
+    boolean this_present_data = true && this.isSetData();
+    boolean that_present_data = true && that.isSetData();
+    if (this_present_data || that_present_data) {
+      if (!(this_present_data && that_present_data))
+        return false;
+      if (!this.data.equals(that.data))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_partitionVals = true && this.isSetPartitionVals();
+    boolean that_present_partitionVals = true && that.isSetPartitionVals();
+    if (this_present_partitionVals || that_present_partitionVals) {
+      if (!(this_present_partitionVals && that_present_partitionVals))
+        return false;
+      if (!this.partitionVals.equals(that.partitionVals))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_successful = true;
+    list.add(present_successful);
+    if (present_successful)
+      list.add(successful);
+
+    boolean present_data = true && (isSetData());
+    list.add(present_data);
+    if (present_data)
+      list.add(data);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_partitionVals = true && (isSetPartitionVals());
+    list.add(present_partitionVals);
+    if (present_partitionVals)
+      list.add(partitionVals);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FireEventRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSuccessful()).compareTo(other.isSetSuccessful());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSuccessful()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.successful, other.successful);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetData()).compareTo(other.isSetData());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetData()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.data, other.data);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionVals()).compareTo(other.isSetPartitionVals());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionVals()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionVals, other.partitionVals);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FireEventRequest(");
+    boolean first = true;
+
+    sb.append("successful:");
+    sb.append(this.successful);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("data:");
+    if (this.data == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.data);
+    }
+    first = false;
+    if (isSetDbName()) {
+      if (!first) sb.append(", ");
+      sb.append("dbName:");
+      if (this.dbName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.dbName);
+      }
+      first = false;
+    }
+    if (isSetTableName()) {
+      if (!first) sb.append(", ");
+      sb.append("tableName:");
+      if (this.tableName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.tableName);
+      }
+      first = false;
+    }
+    if (isSetPartitionVals()) {
+      if (!first) sb.append(", ");
+      sb.append("partitionVals:");
+      if (this.partitionVals == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitionVals);
+      }
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetSuccessful()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'successful' is unset! Struct:" + toString());
+    }
+
+    if (!isSetData()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'data' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FireEventRequestStandardSchemeFactory implements SchemeFactory {
+    public FireEventRequestStandardScheme getScheme() {
+      return new FireEventRequestStandardScheme();
+    }
+  }
+
+  private static class FireEventRequestStandardScheme extends StandardScheme<FireEventRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SUCCESSFUL
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.successful = iprot.readBool();
+              struct.setSuccessfulIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.data = new FireEventRequestData();
+              struct.data.read(iprot);
+              struct.setDataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // PARTITION_VALS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list756 = iprot.readListBegin();
+                struct.partitionVals = new ArrayList<String>(_list756.size);
+                String _elem757;
+                for (int _i758 = 0; _i758 < _list756.size; ++_i758)
+                {
+                  _elem757 = iprot.readString();
+                  struct.partitionVals.add(_elem757);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionValsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(SUCCESSFUL_FIELD_DESC);
+      oprot.writeBool(struct.successful);
+      oprot.writeFieldEnd();
+      if (struct.data != null) {
+        oprot.writeFieldBegin(DATA_FIELD_DESC);
+        struct.data.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        if (struct.isSetDbName()) {
+          oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+          oprot.writeString(struct.dbName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.tableName != null) {
+        if (struct.isSetTableName()) {
+          oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+          oprot.writeString(struct.tableName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.partitionVals != null) {
+        if (struct.isSetPartitionVals()) {
+          oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size()));
+            for (String _iter759 : struct.partitionVals)
+            {
+              oprot.writeString(_iter759);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FireEventRequestTupleSchemeFactory implements SchemeFactory {
+    public FireEventRequestTupleScheme getScheme() {
+      return new FireEventRequestTupleScheme();
+    }
+  }
+
+  private static class FireEventRequestTupleScheme extends TupleScheme<FireEventRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBool(struct.successful);
+      struct.data.write(oprot);
+      BitSet optionals = new BitSet();
+      if (struct.isSetDbName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTableName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetPartitionVals()) {
+        optionals.set(2);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(3);
+      }
+      oprot.writeBitSet(optionals, 4);
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetTableName()) {
+        oprot.writeString(struct.tableName);
+      }
+      if (struct.isSetPartitionVals()) {
+        {
+          oprot.writeI32(struct.partitionVals.size());
+          for (String _iter760 : struct.partitionVals)
+          {
+            oprot.writeString(_iter760);
+          }
+        }
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.successful = iprot.readBool();
+      struct.setSuccessfulIsSet(true);
+      struct.data = new FireEventRequestData();
+      struct.data.read(iprot);
+      struct.setDataIsSet(true);
+      BitSet incoming = iprot.readBitSet(4);
+      if (incoming.get(0)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.tableName = iprot.readString();
+        struct.setTableNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TList _list761 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.partitionVals = new ArrayList<String>(_list761.size);
+          String _elem762;
+          for (int _i763 = 0; _i763 < _list761.size; ++_i763)
+          {
+            _elem762 = iprot.readString();
+            struct.partitionVals.add(_elem762);
+          }
+        }
+        struct.setPartitionValsIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequestData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequestData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequestData.java
new file mode 100644
index 0000000..a0da34b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequestData.java
@@ -0,0 +1,309 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FireEventRequestData extends org.apache.thrift.TUnion<FireEventRequestData, FireEventRequestData._Fields> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventRequestData");
+  private static final org.apache.thrift.protocol.TField INSERT_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("insertData", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    INSERT_DATA((short)1, "insertData");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // INSERT_DATA
+          return INSERT_DATA;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.INSERT_DATA, new org.apache.thrift.meta_data.FieldMetaData("insertData", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, InsertEventRequestData.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FireEventRequestData.class, metaDataMap);
+  }
+
+  public FireEventRequestData() {
+    super();
+  }
+
+  public FireEventRequestData(_Fields setField, Object value) {
+    super(setField, value);
+  }
+
+  public FireEventRequestData(FireEventRequestData other) {
+    super(other);
+  }
+  public FireEventRequestData deepCopy() {
+    return new FireEventRequestData(this);
+  }
+
+  public static FireEventRequestData insertData(InsertEventRequestData value) {
+    FireEventRequestData x = new FireEventRequestData();
+    x.setInsertData(value);
+    return x;
+  }
+
+
+  @Override
+  protected void checkType(_Fields setField, Object value) throws ClassCastException {
+    switch (setField) {
+      case INSERT_DATA:
+        if (value instanceof InsertEventRequestData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type InsertEventRequestData for field 'insertData', but got " + value.getClass().getSimpleName());
+      default:
+        throw new IllegalArgumentException("Unknown field id " + setField);
+    }
+  }
+
+  @Override
+  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
+    _Fields setField = _Fields.findByThriftId(field.id);
+    if (setField != null) {
+      switch (setField) {
+        case INSERT_DATA:
+          if (field.type == INSERT_DATA_FIELD_DESC.type) {
+            InsertEventRequestData insertData;
+            insertData = new InsertEventRequestData();
+            insertData.read(iprot);
+            return insertData;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        default:
+          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
+      }
+    } else {
+      org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+      return null;
+    }
+  }
+
+  @Override
+  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    switch (setField_) {
+      case INSERT_DATA:
+        InsertEventRequestData insertData = (InsertEventRequestData)value_;
+        insertData.write(oprot);
+        return;
+      default:
+        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
+    }
+  }
+
+  @Override
+  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
+    _Fields setField = _Fields.findByThriftId(fieldID);
+    if (setField != null) {
+      switch (setField) {
+        case INSERT_DATA:
+          InsertEventRequestData insertData;
+          insertData = new InsertEventRequestData();
+          insertData.read(iprot);
+          return insertData;
+        default:
+          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
+      }
+    } else {
+      throw new TProtocolException("Couldn't find a field with field id " + fieldID);
+    }
+  }
+
+  @Override
+  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    switch (setField_) {
+      case INSERT_DATA:
+        InsertEventRequestData insertData = (InsertEventRequestData)value_;
+        insertData.write(oprot);
+        return;
+      default:
+        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
+    }
+  }
+
+  @Override
+  protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) {
+    switch (setField) {
+      case INSERT_DATA:
+        return INSERT_DATA_FIELD_DESC;
+      default:
+        throw new IllegalArgumentException("Unknown field id " + setField);
+    }
+  }
+
+  @Override
+  protected org.apache.thrift.protocol.TStruct getStructDesc() {
+    return STRUCT_DESC;
+  }
+
+  @Override
+  protected _Fields enumForId(short id) {
+    return _Fields.findByThriftIdOrThrow(id);
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+
+  public InsertEventRequestData getInsertData() {
+    if (getSetField() == _Fields.INSERT_DATA) {
+      return (InsertEventRequestData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'insertData' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setInsertData(InsertEventRequestData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.INSERT_DATA;
+    value_ = value;
+  }
+
+  public boolean isSetInsertData() {
+    return setField_ == _Fields.INSERT_DATA;
+  }
+
+
+  public boolean equals(Object other) {
+    if (other instanceof FireEventRequestData) {
+      return equals((FireEventRequestData)other);
+    } else {
+      return false;
+    }
+  }
+
+  public boolean equals(FireEventRequestData other) {
+    return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue());
+  }
+
+  @Override
+  public int compareTo(FireEventRequestData other) {
+    int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField());
+    if (lastComparison == 0) {
+      return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue());
+    }
+    return lastComparison;
+  }
+
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+    list.add(this.getClass().getName());
+    org.apache.thrift.TFieldIdEnum setField = getSetField();
+    if (setField != null) {
+      list.add(setField.getThriftFieldId());
+      Object value = getFieldValue();
+      if (value instanceof org.apache.thrift.TEnum) {
+        list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue());
+      } else {
+        list.add(value);
+      }
+    }
+    return list.hashCode();
+  }
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
new file mode 100644
index 0000000..9125d86
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class FireEventResponse implements org.apache.thrift.TBase<FireEventResponse, FireEventResponse._Fields>, java.io.Serializable, Cloneable, Comparable<FireEventResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("FireEventResponse");
+
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FireEventResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FireEventResponseTupleSchemeFactory());
+  }
+
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(FireEventResponse.class, metaDataMap);
+  }
+
+  public FireEventResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public FireEventResponse(FireEventResponse other) {
+  }
+
+  public FireEventResponse deepCopy() {
+    return new FireEventResponse(this);
+  }
+
+  @Override
+  public void clear() {
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof FireEventResponse)
+      return this.equals((FireEventResponse)that);
+    return false;
+  }
+
+  public boolean equals(FireEventResponse that) {
+    if (that == null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(FireEventResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("FireEventResponse(");
+    boolean first = true;
+
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FireEventResponseStandardSchemeFactory implements SchemeFactory {
+    public FireEventResponseStandardScheme getScheme() {
+      return new FireEventResponseStandardScheme();
+    }
+  }
+
+  private static class FireEventResponseStandardScheme extends StandardScheme<FireEventResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FireEventResponseTupleSchemeFactory implements SchemeFactory {
+    public FireEventResponseTupleScheme getScheme() {
+      return new FireEventResponseTupleScheme();
+    }
+  }
+
+  private static class FireEventResponseTupleScheme extends TupleScheme<FireEventResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, FireEventResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, FireEventResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+    }
+  }
+
+}
+


[53/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index 0000000,62ed380..7cf5c26
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@@ -1,0 -1,1075 +1,1075 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.cache;
+ 
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.concurrent.Callable;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.ThreadFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ndv.hll.HyperLogLog;
+ import org.apache.hadoop.hive.metastore.HiveMetaStore;
+ import org.apache.hadoop.hive.metastore.MetaStoreTestUtils;
+ import org.apache.hadoop.hive.metastore.ObjectStore;
+ import org.apache.hadoop.hive.metastore.TableType;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
+ import org.apache.hadoop.hive.metastore.api.AggrStats;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
+ import org.apache.hadoop.hive.metastore.columnstats.cache.StringColumnStatsDataInspector;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.junit.Assert;
+ import org.junit.Before;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ 
+ import jline.internal.Log;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreCheckinTest.class)
+ public class TestCachedStore {
+ 
+   private ObjectStore objectStore;
+   private CachedStore cachedStore;
+   private SharedCache sharedCache;
+   private Configuration conf;
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+     // Disable memory estimation for this test class
+     MetastoreConf.setVar(conf, MetastoreConf.ConfVars.CACHED_RAW_STORE_MAX_CACHE_MEMORY, "-1Kb");
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     objectStore = new ObjectStore();
+     objectStore.setConf(conf);
+     cachedStore = new CachedStore();
+     cachedStore.setConfForTest(conf);
+     // Stop the CachedStore cache update service. We'll start it explicitly to control the test
+     CachedStore.stopCacheUpdateService(1);
+     sharedCache = new SharedCache();
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+ 
+     // Create the 'hive' catalog
+     HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+   }
+ 
+   /**********************************************************************************************
+    * Methods that test CachedStore
+    *********************************************************************************************/
+ 
+   @Test
+   public void testDatabaseOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testDatabaseOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database via CachedStore
+     Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Add another db via CachedStore
+     final String dbName1 = "testDatabaseOps1";
+     Database db1 = createTestDb(dbName1, dbOwner);
+     cachedStore.createDatabase(db1);
+     db1 = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+ 
+     // Read db via ObjectStore
+     dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+     Assert.assertEquals(db1, dbRead);
+ 
+     // Alter the db via CachedStore (can only alter owner or parameters)
+     dbOwner = "user2";
+     db = new Database(db);
+     db.setOwnerName(dbOwner);
+     cachedStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db);
+     db = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Read db via ObjectStore
+     dbRead = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Add another db via ObjectStore
+     final String dbName2 = "testDatabaseOps2";
+     Database db2 = createTestDb(dbName2, dbOwner);
+     objectStore.createDatabase(db2);
+     db2 = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
+ 
+     // Alter db "testDatabaseOps" via ObjectStore
+     dbOwner = "user1";
+     db = new Database(db);
+     db.setOwnerName(dbOwner);
+     objectStore.alterDatabase(DEFAULT_CATALOG_NAME, dbName, db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Drop db "testDatabaseOps1" via ObjectStore
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName1);
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read the newly added db via CachedStore
+     dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName2);
+     Assert.assertEquals(db2, dbRead);
+ 
+     // Read the altered db via CachedStore (altered user from "user2" to "user1")
+     dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+ 
+     // Try to read the dropped db after cache update
+     try {
+       dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName1);
+       Assert.fail("The database: " + dbName1
+           + " should have been removed from the cache after running the update service");
+     } catch (NoSuchObjectException e) {
+       // Expected
+     }
+ 
+     // Clean up
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName2);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   @Test
+   public void testTableOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testTableOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     String tblName = "tbl";
+     String tblOwner = "user1";
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database, table via CachedStore
+     Database dbRead= cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+     Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+ 
+     // Add a new table via CachedStore
+     String tblName1 = "tbl1";
+     Table tbl1 = new Table(tbl);
+     tbl1.setTableName(tblName1);
+     cachedStore.createTable(tbl1);
+     tbl1 = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+ 
+     // Read via object store
+     tblRead = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+     Assert.assertEquals(tbl1, tblRead);
+ 
+     // Add a new table via ObjectStore
+     String tblName2 = "tbl2";
+     Table tbl2 = new Table(tbl);
+     tbl2.setTableName(tblName2);
+     objectStore.createTable(tbl2);
+     tbl2 = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+ 
+     // Alter table "tbl" via ObjectStore
+     tblOwner = "role1";
+     tbl.setOwner(tblOwner);
+     tbl.setOwnerType(PrincipalType.ROLE);
 -    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl);
++    objectStore.alterTable(DEFAULT_CATALOG_NAME, dbName, tblName, tbl, -1, null);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     Assert.assertEquals("Owner of the table did not change.", tblOwner, tbl.getOwner());
+     Assert.assertEquals("Owner type of the table did not change", PrincipalType.ROLE, tbl.getOwnerType());
+ 
+     // Drop table "tbl1" via ObjectStore
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read "tbl2" via CachedStore
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+     Assert.assertEquals(tbl2, tblRead);
+ 
+     // Read the altered "tbl" via CachedStore
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+ 
+     // Try to read the dropped "tbl1" via CachedStore (should throw exception)
+     tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName1);
+     Assert.assertNull(tblRead);
+ 
+     // Should return "tbl" and "tbl2"
+     List<String> tblNames = cachedStore.getTables(DEFAULT_CATALOG_NAME, dbName, "*");
+     Assert.assertTrue(tblNames.contains(tblName));
+     Assert.assertTrue(!tblNames.contains(tblName1));
+     Assert.assertTrue(tblNames.contains(tblName2));
+ 
+     // Clean up
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName2);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   @Test
+   public void testPartitionOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testPartitionOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     String tblName = "tbl";
+     String tblOwner = "user1";
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     ptnCols.add(ptnCol1);
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     final String ptnColVal1 = "aaa";
+     Map<String, String> partParams = new HashMap<String, String>();
+     Partition ptn1 =
+         new Partition(Arrays.asList(ptnColVal1), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn1);
+     ptn1 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     final String ptnColVal2 = "bbb";
+     Partition ptn2 =
+         new Partition(Arrays.asList(ptnColVal2), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn2);
+     ptn2 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read database, table, partition via CachedStore
+     Database dbRead = cachedStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+     Assert.assertEquals(db, dbRead);
+     Table tblRead = cachedStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     Assert.assertEquals(tbl, tblRead);
+     Partition ptn1Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1));
+     Assert.assertEquals(ptn1, ptn1Read);
+     Partition ptn2Read = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+     Assert.assertEquals(ptn2, ptn2Read);
+ 
+     // Add a new partition via ObjectStore
+     final String ptnColVal3 = "ccc";
+     Partition ptn3 =
+         new Partition(Arrays.asList(ptnColVal3), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn3.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(ptn3);
+     ptn3 = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+ 
+     // Alter an existing partition ("aaa") via ObjectStore
+     final String ptnColVal1Alt = "aaaAlt";
+     Partition ptn1Atl =
+         new Partition(Arrays.asList(ptnColVal1Alt), dbName, tblName, 0, 0, tbl.getSd(), partParams);
+     ptn1Atl.setCatName(DEFAULT_CATALOG_NAME);
 -    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl);
++    objectStore.alterPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1), ptn1Atl, -1, null);
+     ptn1Atl = objectStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+ 
+     // Drop an existing partition ("bbb") via ObjectStore
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+ 
+     // We update twice to accurately detect if cache is dirty or not
+     updateCache(cachedStore);
+     updateCache(cachedStore);
+ 
+     // Read the newly added partition via CachedStore
+     Partition ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+     Assert.assertEquals(ptn3, ptnRead);
+ 
+     // Read the altered partition via CachedStore
+     ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+     Assert.assertEquals(ptn1Atl, ptnRead);
+ 
+     // Try to read the dropped partition via CachedStore
+     try {
+       ptnRead = cachedStore.getPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal2));
+       Assert.fail("The partition: " + ptnColVal2
+           + " should have been removed from the cache after running the update service");
+     } catch (NoSuchObjectException e) {
+       // Expected
+     }
+     // Clean up
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal1Alt));
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, dbName, tblName, Arrays.asList(ptnColVal3));
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   //@Test
+   public void testTableColStatsOps() throws Exception {
+     // Add a db via ObjectStore
+     String dbName = "testTableColStatsOps";
+     String dbOwner = "user1";
+     Database db = createTestDb(dbName, dbOwner);
+     objectStore.createDatabase(db);
+     db = objectStore.getDatabase(DEFAULT_CATALOG_NAME, dbName);
+ 
+     // Add a table via ObjectStore
+     final String tblName = "tbl";
+     final String tblOwner = "user1";
+     final FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     // Stats values for col1
+     long col1LowVal = 5;
+     long col1HighVal = 500;
+     long col1Nulls = 10;
+     long col1DV = 20;
+     final  FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     // Stats values for col2
+     long col2MaxColLen = 100;
+     double col2AvgColLen = 45.5;
+     long col2Nulls = 5;
+     long col2DV = 40;
+     final FieldSchema col3 = new FieldSchema("col3", "boolean", "boolean column");
+     // Stats values for col3
+     long col3NumTrues = 100;
+     long col3NumFalses = 30;
+     long col3Nulls = 10;
+     final List<FieldSchema> cols = new ArrayList<>();
+     cols.add(col1);
+     cols.add(col2);
+     cols.add(col3);
+     FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     ptnCols.add(ptnCol1);
+     Table tbl = createTestTbl(dbName, tblName, tblOwner, cols, ptnCols);
+     objectStore.createTable(tbl);
+     tbl = objectStore.getTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+ 
+     // Add ColumnStatistics for tbl to metastore DB via ObjectStore
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     // Col1
+     ColumnStatisticsData data1 = new ColumnStatisticsData();
+     ColumnStatisticsObj col1Stats = new ColumnStatisticsObj(col1.getName(), col1.getType(), data1);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(col1LowVal);
+     longStats.setHighValue(col1HighVal);
+     longStats.setNumNulls(col1Nulls);
+     longStats.setNumDVs(col1DV);
+     data1.setLongStats(longStats);
+     colStatObjs.add(col1Stats);
+ 
+     // Col2
+     ColumnStatisticsData data2 = new ColumnStatisticsData();
+     ColumnStatisticsObj col2Stats = new ColumnStatisticsObj(col2.getName(), col2.getType(), data2);
+     StringColumnStatsDataInspector stringStats = new StringColumnStatsDataInspector();
+     stringStats.setMaxColLen(col2MaxColLen);
+     stringStats.setAvgColLen(col2AvgColLen);
+     stringStats.setNumNulls(col2Nulls);
+     stringStats.setNumDVs(col2DV);
+     data2.setStringStats(stringStats);
+     colStatObjs.add(col2Stats);
+ 
+     // Col3
+     ColumnStatisticsData data3 = new ColumnStatisticsData();
+     ColumnStatisticsObj col3Stats = new ColumnStatisticsObj(col3.getName(), col3.getType(), data3);
+     BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+     boolStats.setNumTrues(col3NumTrues);
+     boolStats.setNumFalses(col3NumFalses);
+     boolStats.setNumNulls(col3Nulls);
+     data3.setBooleanStats(boolStats);
+     colStatObjs.add(col3Stats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
+     // Save to DB
+     objectStore.updateTableColumnStatistics(stats);
+ 
+     // Prewarm CachedStore
+     CachedStore.setCachePrewarmedState(false);
+     CachedStore.prewarm(objectStore);
+ 
+     // Read table stats via CachedStore
+     ColumnStatistics newStats =
+         cachedStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, dbName, tblName,
+             Arrays.asList(col1.getName(), col2.getName(), col3.getName()));
+     Assert.assertEquals(stats, newStats);
+ 
+     // Clean up
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, dbName, tblName);
+     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, dbName);
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   /**********************************************************************************************
+    * Methods that test SharedCache
+    *********************************************************************************************/
+ 
+   @Test
+   public void testSharedStoreDb() {
+     Database db1 = createTestDb("db1", "user1");
+     Database db2 = createTestDb("db2", "user1");
+     Database db3 = createTestDb("db3", "user1");
+     Database newDb1 = createTestDb("newdb1", "user1");
+     sharedCache.addDatabaseToCache(db1);
+     sharedCache.addDatabaseToCache(db2);
+     sharedCache.addDatabaseToCache(db3);
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3);
+     sharedCache.alterDatabaseInCache(DEFAULT_CATALOG_NAME, "db1", newDb1);
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 3);
+     sharedCache.removeDatabaseFromCache(DEFAULT_CATALOG_NAME, "db2");
+     Assert.assertEquals(sharedCache.getCachedDatabaseCount(), 2);
+     List<String> dbs = sharedCache.listCachedDatabases(DEFAULT_CATALOG_NAME);
+     Assert.assertEquals(dbs.size(), 2);
+     Assert.assertTrue(dbs.contains("newdb1"));
+     Assert.assertTrue(dbs.contains("db3"));
+   }
+ 
+   @Test
+   public void testSharedStoreTable() {
+     Table tbl1 = new Table();
+     StorageDescriptor sd1 = new StorageDescriptor();
+     List<FieldSchema> cols1 = new ArrayList<>();
+     cols1.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params1 = new HashMap<>();
+     params1.put("key", "value");
+     sd1.setCols(cols1);
+     sd1.setParameters(params1);
+     sd1.setLocation("loc1");
+     tbl1.setSd(sd1);
+     tbl1.setPartitionKeys(new ArrayList<>());
+ 
+     Table tbl2 = new Table();
+     StorageDescriptor sd2 = new StorageDescriptor();
+     List<FieldSchema> cols2 = new ArrayList<>();
+     cols2.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params2 = new HashMap<>();
+     params2.put("key", "value");
+     sd2.setCols(cols2);
+     sd2.setParameters(params2);
+     sd2.setLocation("loc2");
+     tbl2.setSd(sd2);
+     tbl2.setPartitionKeys(new ArrayList<>());
+ 
+     Table tbl3 = new Table();
+     StorageDescriptor sd3 = new StorageDescriptor();
+     List<FieldSchema> cols3 = new ArrayList<>();
+     cols3.add(new FieldSchema("col3", "int", ""));
+     Map<String, String> params3 = new HashMap<>();
+     params3.put("key2", "value2");
+     sd3.setCols(cols3);
+     sd3.setParameters(params3);
+     sd3.setLocation("loc3");
+     tbl3.setSd(sd3);
+     tbl3.setPartitionKeys(new ArrayList<>());
+ 
+     Table newTbl1 = new Table();
+     newTbl1.setDbName("db2");
+     newTbl1.setTableName("tbl1");
+     StorageDescriptor newSd1 = new StorageDescriptor();
+     List<FieldSchema> newCols1 = new ArrayList<>();
+     newCols1.add(new FieldSchema("newcol1", "int", ""));
+     Map<String, String> newParams1 = new HashMap<>();
+     newParams1.put("key", "value");
+     newSd1.setCols(newCols1);
+     newSd1.setParameters(params1);
+     newSd1.setLocation("loc1");
+     newTbl1.setSd(newSd1);
+     newTbl1.setPartitionKeys(new ArrayList<>());
+ 
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl1", tbl1);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl2", tbl2);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db1", "tbl3", tbl3);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", tbl1);
+ 
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 4);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+ 
+     Table t = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1");
+     Assert.assertEquals(t.getSd().getLocation(), "loc1");
+ 
+     sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl1");
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 3);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+ 
+     sharedCache.alterTableInCache(DEFAULT_CATALOG_NAME, "db2", "tbl1", newTbl1);
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 3);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 3);
+ 
+     sharedCache.removeTableFromCache(DEFAULT_CATALOG_NAME, "db1", "tbl2");
+     Assert.assertEquals(sharedCache.getCachedTableCount(), 2);
+     Assert.assertEquals(sharedCache.getSdCache().size(), 2);
+   }
+ 
+ 
+   @Test
+   public void testSharedStorePartition() {
+     String dbName = "db1";
+     String tbl1Name = "tbl1";
+     String tbl2Name = "tbl2";
+     String owner = "user1";
+     Database db = createTestDb(dbName, owner);
+     sharedCache.addDatabaseToCache(db);
+     FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+     FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+     List<FieldSchema> cols = new ArrayList<FieldSchema>();
+     cols.add(col1);
+     cols.add(col2);
+     List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+     Table tbl1 = createTestTbl(dbName, tbl1Name, owner, cols, ptnCols);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, tbl1);
+     Table tbl2 = createTestTbl(dbName, tbl2Name, owner, cols, ptnCols);
+     sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, tbl2);
+ 
+     Partition part1 = new Partition();
+     StorageDescriptor sd1 = new StorageDescriptor();
+     List<FieldSchema> cols1 = new ArrayList<>();
+     cols1.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params1 = new HashMap<>();
+     params1.put("key", "value");
+     sd1.setCols(cols1);
+     sd1.setParameters(params1);
+     sd1.setLocation("loc1");
+     part1.setSd(sd1);
+     part1.setValues(Arrays.asList("201701"));
+ 
+     Partition part2 = new Partition();
+     StorageDescriptor sd2 = new StorageDescriptor();
+     List<FieldSchema> cols2 = new ArrayList<>();
+     cols2.add(new FieldSchema("col1", "int", ""));
+     Map<String, String> params2 = new HashMap<>();
+     params2.put("key", "value");
+     sd2.setCols(cols2);
+     sd2.setParameters(params2);
+     sd2.setLocation("loc2");
+     part2.setSd(sd2);
+     part2.setValues(Arrays.asList("201702"));
+ 
+     Partition part3 = new Partition();
+     StorageDescriptor sd3 = new StorageDescriptor();
+     List<FieldSchema> cols3 = new ArrayList<>();
+     cols3.add(new FieldSchema("col3", "int", ""));
+     Map<String, String> params3 = new HashMap<>();
+     params3.put("key2", "value2");
+     sd3.setCols(cols3);
+     sd3.setParameters(params3);
+     sd3.setLocation("loc3");
+     part3.setSd(sd3);
+     part3.setValues(Arrays.asList("201703"));
+ 
+     Partition newPart1 = new Partition();
+     newPart1.setDbName(dbName);
+     newPart1.setTableName(tbl1Name);
+     StorageDescriptor newSd1 = new StorageDescriptor();
+     List<FieldSchema> newCols1 = new ArrayList<>();
+     newCols1.add(new FieldSchema("newcol1", "int", ""));
+     Map<String, String> newParams1 = new HashMap<>();
+     newParams1.put("key", "value");
+     newSd1.setCols(newCols1);
+     newSd1.setParameters(params1);
+     newSd1.setLocation("loc1new");
+     newPart1.setSd(newSd1);
+     newPart1.setValues(Arrays.asList("201701"));
+ 
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part1);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part2);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, part3);
+     sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, part1);
+ 
+     Partition t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"));
+     Assert.assertEquals(t.getSd().getLocation(), "loc1");
+ 
+     sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701"));
+     t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl2Name, Arrays.asList("201701"));
+     Assert.assertNull(t);
+ 
+     sharedCache.alterPartitionInCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"), newPart1);
+     t = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbName, tbl1Name, Arrays.asList("201701"));
+     Assert.assertEquals(t.getSd().getLocation(), "loc1new");
+   }
+ 
+   @Test
+   public void testAggrStatsRepeatedRead() throws Exception {
+     String dbName = "testTableColStatsOps";
+     String tblName = "tbl";
+     String colName = "f1";
+ 
+     Database db = new DatabaseBuilder()
+         .setName(dbName)
+         .setLocation("some_location")
+         .build(conf);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
+     cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
+     cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+   }
+ 
+   @Test
+   public void testPartitionAggrStats() throws Exception {
+     String dbName = "testTableColStatsOps1";
+     String tblName = "tbl1";
+     String colName = "f1";
+ 
+     Database db = new Database(dbName, null, "some_location", null);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
+     cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
+ 
+     longStats.setNumDVs(40);
+     cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 40);
+   }
+ 
+   @Test
+   public void testPartitionAggrStatsBitVector() throws Exception {
+     String dbName = "testTableColStatsOps2";
+     String tblName = "tbl2";
+     String colName = "f1";
+ 
+     Database db = new Database(dbName, null, "some_location", null);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     cachedStore.createDatabase(db);
+ 
+     List<FieldSchema> cols = new ArrayList<>();
+     cols.add(new FieldSchema(colName, "int", null));
+     List<FieldSchema> partCols = new ArrayList<>();
+     partCols.add(new FieldSchema("col", "int", null));
+     StorageDescriptor sd =
+         new StorageDescriptor(cols, null, "input", "output", false, 0, new SerDeInfo("serde", "seriallib", new HashMap<>()),
+             null, null, null);
+ 
+     Table tbl =
+         new Table(tblName, dbName, null, 0, 0, 0, sd, partCols, new HashMap<>(),
+             null, null, TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.createTable(tbl);
+ 
+     List<String> partVals1 = new ArrayList<>();
+     partVals1.add("1");
+     List<String> partVals2 = new ArrayList<>();
+     partVals2.add("2");
+ 
+     Partition ptn1 =
+         new Partition(partVals1, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn1.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn1);
+     Partition ptn2 =
+         new Partition(partVals2, dbName, tblName, 0, 0, sd, new HashMap<>());
+     ptn2.setCatName(DEFAULT_CATALOG_NAME);
+     cachedStore.addPartition(ptn2);
+ 
+     ColumnStatistics stats = new ColumnStatistics();
+     ColumnStatisticsDesc statsDesc = new ColumnStatisticsDesc(true, dbName, tblName);
+     statsDesc.setPartName("col");
+     List<ColumnStatisticsObj> colStatObjs = new ArrayList<>();
+ 
+     ColumnStatisticsData data = new ColumnStatisticsData();
+     ColumnStatisticsObj colStats = new ColumnStatisticsObj(colName, "int", data);
+     LongColumnStatsDataInspector longStats = new LongColumnStatsDataInspector();
+     longStats.setLowValue(0);
+     longStats.setHighValue(100);
+     longStats.setNumNulls(50);
+     longStats.setNumDVs(30);
+ 
+     HyperLogLog hll = HyperLogLog.builder().build();
+     hll.addLong(1);
+     hll.addLong(2);
+     hll.addLong(3);
+     longStats.setBitVectors(hll.serialize());
+ 
+     data.setLongStats(longStats);
+     colStatObjs.add(colStats);
+ 
+     stats.setStatsDesc(statsDesc);
+     stats.setStatsObj(colStatObjs);
+ 
+     cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals1);
+ 
+     longStats.setNumDVs(40);
+     hll = HyperLogLog.builder().build();
+     hll.addLong(2);
+     hll.addLong(3);
+     hll.addLong(4);
+     hll.addLong(5);
+     longStats.setBitVectors(hll.serialize());
+ 
+     cachedStore.updatePartitionColumnStatistics(stats.deepCopy(), partVals2);
+ 
+     List<String> colNames = new ArrayList<>();
+     colNames.add(colName);
+     List<String> aggrPartVals = new ArrayList<>();
+     aggrPartVals.add("1");
+     aggrPartVals.add("2");
+     AggrStats aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
+     aggrStats = cachedStore.get_aggr_stats_for(DEFAULT_CATALOG_NAME, dbName, tblName, aggrPartVals, colNames);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls(), 100);
+     Assert.assertEquals(aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumDVs(), 5);
+   }
+ 
+   @Test
+   public void testMultiThreadedSharedCacheOps() throws Exception {
+     List<String> dbNames = new ArrayList<String>(Arrays.asList("db1", "db2", "db3", "db4", "db5"));
+     List<Callable<Object>> tasks = new ArrayList<Callable<Object>>();
+     ExecutorService executor = Executors.newFixedThreadPool(50, new ThreadFactory() {
+       @Override
+       public Thread newThread(Runnable r) {
+         Thread t = Executors.defaultThreadFactory().newThread(r);
+         t.setDaemon(true);
+         return t;
+       }
+     });
+ 
+     // Create 5 dbs
+     for (String dbName : dbNames) {
+       Callable<Object> c = new Callable<Object>() {
+         public Object call() {
+           Database db = createTestDb(dbName, "user1");
+           sharedCache.addDatabaseToCache(db);
+           return null;
+         }
+       };
+       tasks.add(c);
+     }
+     executor.invokeAll(tasks);
+     for (String dbName : dbNames) {
+       Database db = sharedCache.getDatabaseFromCache(DEFAULT_CATALOG_NAME, dbName);
+       Assert.assertNotNull(db);
+       Assert.assertEquals(dbName, db.getName());
+     }
+ 
+     // Created 5 tables under "db1"
+     List<String> tblNames =
+         new ArrayList<String>(Arrays.asList("tbl1", "tbl2", "tbl3", "tbl4", "tbl5"));
+     tasks.clear();
+     for (String tblName : tblNames) {
+       FieldSchema col1 = new FieldSchema("col1", "int", "integer column");
+       FieldSchema col2 = new FieldSchema("col2", "string", "string column");
+       List<FieldSchema> cols = new ArrayList<FieldSchema>();
+       cols.add(col1);
+       cols.add(col2);
+       FieldSchema ptnCol1 = new FieldSchema("part1", "string", "string partition column");
+       List<FieldSchema> ptnCols = new ArrayList<FieldSchema>();
+       ptnCols.add(ptnCol1);
+       Callable<Object> c = new Callable<Object>() {
+         public Object call() {
+           Table tbl = createTestTbl(dbNames.get(0), tblName, "user1", cols, ptnCols);
+           sharedCache.addTableToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, tbl);
+           return null;
+         }
+       };
+       tasks.add(c);
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : tblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       Assert.assertNotNull(tbl);
+       Assert.assertEquals(tblName, tbl.getTableName());
+     }
+ 
+     // Add 5 partitions to all tables
+     List<String> ptnVals = new ArrayList<String>(Arrays.asList("aaa", "bbb", "ccc", "ddd", "eee"));
+     tasks.clear();
+     for (String tblName : tblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       for (String ptnVal : ptnVals) {
+         Map<String, String> partParams = new HashMap<String, String>();
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0,
+                 tbl.getSd(), partParams);
+             sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn);
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : tblNames) {
+       for (String ptnVal : ptnVals) {
+         Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+         Assert.assertNotNull(ptn);
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(Arrays.asList(ptnVal), ptn.getValues());
+       }
+     }
+ 
+     // Drop all partitions from "tbl1", "tbl2", "tbl3" and add 2 new partitions to "tbl4" and "tbl5"
+     List<String> newPtnVals = new ArrayList<String>(Arrays.asList("fff", "ggg"));
+     List<String> dropPtnTblNames = new ArrayList<String>(Arrays.asList("tbl1", "tbl2", "tbl3"));
+     List<String> addPtnTblNames = new ArrayList<String>(Arrays.asList("tbl4", "tbl5"));
+     tasks.clear();
+     for (String tblName : dropPtnTblNames) {
+       for (String ptnVal : ptnVals) {
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             sharedCache.removePartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     for (String tblName : addPtnTblNames) {
+       Table tbl = sharedCache.getTableFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName);
+       for (String ptnVal : newPtnVals) {
+         Map<String, String> partParams = new HashMap<String, String>();
+         Callable<Object> c = new Callable<Object>() {
+           public Object call() {
+             Partition ptn = new Partition(Arrays.asList(ptnVal), dbNames.get(0), tblName, 0, 0,
+                 tbl.getSd(), partParams);
+             sharedCache.addPartitionToCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, ptn);
+             return null;
+           }
+         };
+         tasks.add(c);
+       }
+     }
+     executor.invokeAll(tasks);
+     for (String tblName : addPtnTblNames) {
+       for (String ptnVal : newPtnVals) {
+         Partition ptn = sharedCache.getPartitionFromCache(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, Arrays.asList(ptnVal));
+         Assert.assertNotNull(ptn);
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(tblName, ptn.getTableName());
+         Assert.assertEquals(Arrays.asList(ptnVal), ptn.getValues());
+       }
+     }
+     for (String tblName : dropPtnTblNames) {
+       List<Partition> ptns = sharedCache.listCachedPartitions(DEFAULT_CATALOG_NAME, dbNames.get(0), tblName, 100);
+       Assert.assertEquals(0, ptns.size());
+     }
+     sharedCache.getDatabaseCache().clear();
+     sharedCache.getTableCache().clear();
+     sharedCache.getSdCache().clear();
+   }
+ 
+   private Database createTestDb(String dbName, String dbOwner) {
+     String dbDescription = dbName;
+     String dbLocation = "file:/tmp";
+     Map<String, String> dbParams = new HashMap<>();
+     Database db = new Database(dbName, dbDescription, dbLocation, dbParams);
+     db.setOwnerName(dbOwner);
+     db.setOwnerType(PrincipalType.USER);
+     db.setCatalogName(DEFAULT_CATALOG_NAME);
+     return db;
+   }
+ 
+   private Table createTestTbl(String dbName, String tblName, String tblOwner,
+       List<FieldSchema> cols, List<FieldSchema> ptnCols) {
+     String serdeLocation = "file:/tmp";
+     Map<String, String> serdeParams = new HashMap<>();
+     Map<String, String> tblParams = new HashMap<>();
+     SerDeInfo serdeInfo = new SerDeInfo("serde", "seriallib", new HashMap<>());
+     StorageDescriptor sd = new StorageDescriptor(cols, serdeLocation, "input", "output", false, 0,
+         serdeInfo, null, null, serdeParams);
+     sd.setStoredAsSubDirectories(false);
+     Table tbl = new Table(tblName, dbName, tblOwner, 0, 0, 0, sd, ptnCols, tblParams, null, null,
+         TableType.MANAGED_TABLE.toString());
+     tbl.setCatName(DEFAULT_CATALOG_NAME);
+     return tbl;
+   }
+ 
+   // This method will return only after the cache has updated once
+   private void updateCache(CachedStore cachedStore) throws InterruptedException {
+     int maxTries = 100000;
+     long updateCountBefore = cachedStore.getCacheUpdateCount();
+     // Start the CachedStore update service
+     CachedStore.startCacheUpdateService(cachedStore.getConf(), true, false);
+     while ((cachedStore.getCacheUpdateCount() != (updateCountBefore + 1)) && (maxTries-- > 0)) {
+       Thread.sleep(1000);
+     }
+     CachedStore.stopCacheUpdateService(100);
+   }
+ }


[17/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
new file mode 100644
index 0000000..d4eed32
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java
@@ -0,0 +1,861 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class LockRequest implements org.apache.thrift.TBase<LockRequest, LockRequest._Fields>, java.io.Serializable, Cloneable, Comparable<LockRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockRequest");
+
+  private static final org.apache.thrift.protocol.TField COMPONENT_FIELD_DESC = new org.apache.thrift.protocol.TField("component", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField HOSTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("hostname", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField AGENT_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("agentInfo", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new LockRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new LockRequestTupleSchemeFactory());
+  }
+
+  private List<LockComponent> component; // required
+  private long txnid; // optional
+  private String user; // required
+  private String hostname; // required
+  private String agentInfo; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    COMPONENT((short)1, "component"),
+    TXNID((short)2, "txnid"),
+    USER((short)3, "user"),
+    HOSTNAME((short)4, "hostname"),
+    AGENT_INFO((short)5, "agentInfo");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // COMPONENT
+          return COMPONENT;
+        case 2: // TXNID
+          return TXNID;
+        case 3: // USER
+          return USER;
+        case 4: // HOSTNAME
+          return HOSTNAME;
+        case 5: // AGENT_INFO
+          return AGENT_INFO;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXNID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.TXNID,_Fields.AGENT_INFO};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.COMPONENT, new org.apache.thrift.meta_data.FieldMetaData("component", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, LockComponent.class))));
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.USER, new org.apache.thrift.meta_data.FieldMetaData("user", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.HOSTNAME, new org.apache.thrift.meta_data.FieldMetaData("hostname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.AGENT_INFO, new org.apache.thrift.meta_data.FieldMetaData("agentInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LockRequest.class, metaDataMap);
+  }
+
+  public LockRequest() {
+    this.agentInfo = "Unknown";
+
+  }
+
+  public LockRequest(
+    List<LockComponent> component,
+    String user,
+    String hostname)
+  {
+    this();
+    this.component = component;
+    this.user = user;
+    this.hostname = hostname;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public LockRequest(LockRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetComponent()) {
+      List<LockComponent> __this__component = new ArrayList<LockComponent>(other.component.size());
+      for (LockComponent other_element : other.component) {
+        __this__component.add(new LockComponent(other_element));
+      }
+      this.component = __this__component;
+    }
+    this.txnid = other.txnid;
+    if (other.isSetUser()) {
+      this.user = other.user;
+    }
+    if (other.isSetHostname()) {
+      this.hostname = other.hostname;
+    }
+    if (other.isSetAgentInfo()) {
+      this.agentInfo = other.agentInfo;
+    }
+  }
+
+  public LockRequest deepCopy() {
+    return new LockRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.component = null;
+    setTxnidIsSet(false);
+    this.txnid = 0;
+    this.user = null;
+    this.hostname = null;
+    this.agentInfo = "Unknown";
+
+  }
+
+  public int getComponentSize() {
+    return (this.component == null) ? 0 : this.component.size();
+  }
+
+  public java.util.Iterator<LockComponent> getComponentIterator() {
+    return (this.component == null) ? null : this.component.iterator();
+  }
+
+  public void addToComponent(LockComponent elem) {
+    if (this.component == null) {
+      this.component = new ArrayList<LockComponent>();
+    }
+    this.component.add(elem);
+  }
+
+  public List<LockComponent> getComponent() {
+    return this.component;
+  }
+
+  public void setComponent(List<LockComponent> component) {
+    this.component = component;
+  }
+
+  public void unsetComponent() {
+    this.component = null;
+  }
+
+  /** Returns true if field component is set (has been assigned a value) and false otherwise */
+  public boolean isSetComponent() {
+    return this.component != null;
+  }
+
+  public void setComponentIsSet(boolean value) {
+    if (!value) {
+      this.component = null;
+    }
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public String getUser() {
+    return this.user;
+  }
+
+  public void setUser(String user) {
+    this.user = user;
+  }
+
+  public void unsetUser() {
+    this.user = null;
+  }
+
+  /** Returns true if field user is set (has been assigned a value) and false otherwise */
+  public boolean isSetUser() {
+    return this.user != null;
+  }
+
+  public void setUserIsSet(boolean value) {
+    if (!value) {
+      this.user = null;
+    }
+  }
+
+  public String getHostname() {
+    return this.hostname;
+  }
+
+  public void setHostname(String hostname) {
+    this.hostname = hostname;
+  }
+
+  public void unsetHostname() {
+    this.hostname = null;
+  }
+
+  /** Returns true if field hostname is set (has been assigned a value) and false otherwise */
+  public boolean isSetHostname() {
+    return this.hostname != null;
+  }
+
+  public void setHostnameIsSet(boolean value) {
+    if (!value) {
+      this.hostname = null;
+    }
+  }
+
+  public String getAgentInfo() {
+    return this.agentInfo;
+  }
+
+  public void setAgentInfo(String agentInfo) {
+    this.agentInfo = agentInfo;
+  }
+
+  public void unsetAgentInfo() {
+    this.agentInfo = null;
+  }
+
+  /** Returns true if field agentInfo is set (has been assigned a value) and false otherwise */
+  public boolean isSetAgentInfo() {
+    return this.agentInfo != null;
+  }
+
+  public void setAgentInfoIsSet(boolean value) {
+    if (!value) {
+      this.agentInfo = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case COMPONENT:
+      if (value == null) {
+        unsetComponent();
+      } else {
+        setComponent((List<LockComponent>)value);
+      }
+      break;
+
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    case USER:
+      if (value == null) {
+        unsetUser();
+      } else {
+        setUser((String)value);
+      }
+      break;
+
+    case HOSTNAME:
+      if (value == null) {
+        unsetHostname();
+      } else {
+        setHostname((String)value);
+      }
+      break;
+
+    case AGENT_INFO:
+      if (value == null) {
+        unsetAgentInfo();
+      } else {
+        setAgentInfo((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case COMPONENT:
+      return getComponent();
+
+    case TXNID:
+      return getTxnid();
+
+    case USER:
+      return getUser();
+
+    case HOSTNAME:
+      return getHostname();
+
+    case AGENT_INFO:
+      return getAgentInfo();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case COMPONENT:
+      return isSetComponent();
+    case TXNID:
+      return isSetTxnid();
+    case USER:
+      return isSetUser();
+    case HOSTNAME:
+      return isSetHostname();
+    case AGENT_INFO:
+      return isSetAgentInfo();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof LockRequest)
+      return this.equals((LockRequest)that);
+    return false;
+  }
+
+  public boolean equals(LockRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_component = true && this.isSetComponent();
+    boolean that_present_component = true && that.isSetComponent();
+    if (this_present_component || that_present_component) {
+      if (!(this_present_component && that_present_component))
+        return false;
+      if (!this.component.equals(that.component))
+        return false;
+    }
+
+    boolean this_present_txnid = true && this.isSetTxnid();
+    boolean that_present_txnid = true && that.isSetTxnid();
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    boolean this_present_user = true && this.isSetUser();
+    boolean that_present_user = true && that.isSetUser();
+    if (this_present_user || that_present_user) {
+      if (!(this_present_user && that_present_user))
+        return false;
+      if (!this.user.equals(that.user))
+        return false;
+    }
+
+    boolean this_present_hostname = true && this.isSetHostname();
+    boolean that_present_hostname = true && that.isSetHostname();
+    if (this_present_hostname || that_present_hostname) {
+      if (!(this_present_hostname && that_present_hostname))
+        return false;
+      if (!this.hostname.equals(that.hostname))
+        return false;
+    }
+
+    boolean this_present_agentInfo = true && this.isSetAgentInfo();
+    boolean that_present_agentInfo = true && that.isSetAgentInfo();
+    if (this_present_agentInfo || that_present_agentInfo) {
+      if (!(this_present_agentInfo && that_present_agentInfo))
+        return false;
+      if (!this.agentInfo.equals(that.agentInfo))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_component = true && (isSetComponent());
+    list.add(present_component);
+    if (present_component)
+      list.add(component);
+
+    boolean present_txnid = true && (isSetTxnid());
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    boolean present_user = true && (isSetUser());
+    list.add(present_user);
+    if (present_user)
+      list.add(user);
+
+    boolean present_hostname = true && (isSetHostname());
+    list.add(present_hostname);
+    if (present_hostname)
+      list.add(hostname);
+
+    boolean present_agentInfo = true && (isSetAgentInfo());
+    list.add(present_agentInfo);
+    if (present_agentInfo)
+      list.add(agentInfo);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(LockRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetComponent()).compareTo(other.isSetComponent());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetComponent()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.component, other.component);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetUser()).compareTo(other.isSetUser());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUser()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.user, other.user);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHostname()).compareTo(other.isSetHostname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHostname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hostname, other.hostname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAgentInfo()).compareTo(other.isSetAgentInfo());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAgentInfo()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.agentInfo, other.agentInfo);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("LockRequest(");
+    boolean first = true;
+
+    sb.append("component:");
+    if (this.component == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.component);
+    }
+    first = false;
+    if (isSetTxnid()) {
+      if (!first) sb.append(", ");
+      sb.append("txnid:");
+      sb.append(this.txnid);
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("user:");
+    if (this.user == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.user);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("hostname:");
+    if (this.hostname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.hostname);
+    }
+    first = false;
+    if (isSetAgentInfo()) {
+      if (!first) sb.append(", ");
+      sb.append("agentInfo:");
+      if (this.agentInfo == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.agentInfo);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetComponent()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'component' is unset! Struct:" + toString());
+    }
+
+    if (!isSetUser()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'user' is unset! Struct:" + toString());
+    }
+
+    if (!isSetHostname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hostname' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class LockRequestStandardSchemeFactory implements SchemeFactory {
+    public LockRequestStandardScheme getScheme() {
+      return new LockRequestStandardScheme();
+    }
+  }
+
+  private static class LockRequestStandardScheme extends StandardScheme<LockRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // COMPONENT
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list658 = iprot.readListBegin();
+                struct.component = new ArrayList<LockComponent>(_list658.size);
+                LockComponent _elem659;
+                for (int _i660 = 0; _i660 < _list658.size; ++_i660)
+                {
+                  _elem659 = new LockComponent();
+                  _elem659.read(iprot);
+                  struct.component.add(_elem659);
+                }
+                iprot.readListEnd();
+              }
+              struct.setComponentIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // USER
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.user = iprot.readString();
+              struct.setUserIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // HOSTNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.hostname = iprot.readString();
+              struct.setHostnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // AGENT_INFO
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.agentInfo = iprot.readString();
+              struct.setAgentInfoIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.component != null) {
+        oprot.writeFieldBegin(COMPONENT_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size()));
+          for (LockComponent _iter661 : struct.component)
+          {
+            _iter661.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetTxnid()) {
+        oprot.writeFieldBegin(TXNID_FIELD_DESC);
+        oprot.writeI64(struct.txnid);
+        oprot.writeFieldEnd();
+      }
+      if (struct.user != null) {
+        oprot.writeFieldBegin(USER_FIELD_DESC);
+        oprot.writeString(struct.user);
+        oprot.writeFieldEnd();
+      }
+      if (struct.hostname != null) {
+        oprot.writeFieldBegin(HOSTNAME_FIELD_DESC);
+        oprot.writeString(struct.hostname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.agentInfo != null) {
+        if (struct.isSetAgentInfo()) {
+          oprot.writeFieldBegin(AGENT_INFO_FIELD_DESC);
+          oprot.writeString(struct.agentInfo);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class LockRequestTupleSchemeFactory implements SchemeFactory {
+    public LockRequestTupleScheme getScheme() {
+      return new LockRequestTupleScheme();
+    }
+  }
+
+  private static class LockRequestTupleScheme extends TupleScheme<LockRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.component.size());
+        for (LockComponent _iter662 : struct.component)
+        {
+          _iter662.write(oprot);
+        }
+      }
+      oprot.writeString(struct.user);
+      oprot.writeString(struct.hostname);
+      BitSet optionals = new BitSet();
+      if (struct.isSetTxnid()) {
+        optionals.set(0);
+      }
+      if (struct.isSetAgentInfo()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetTxnid()) {
+        oprot.writeI64(struct.txnid);
+      }
+      if (struct.isSetAgentInfo()) {
+        oprot.writeString(struct.agentInfo);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.component = new ArrayList<LockComponent>(_list663.size);
+        LockComponent _elem664;
+        for (int _i665 = 0; _i665 < _list663.size; ++_i665)
+        {
+          _elem664 = new LockComponent();
+          _elem664.read(iprot);
+          struct.component.add(_elem664);
+        }
+      }
+      struct.setComponentIsSet(true);
+      struct.user = iprot.readString();
+      struct.setUserIsSet(true);
+      struct.hostname = iprot.readString();
+      struct.setHostnameIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.txnid = iprot.readI64();
+        struct.setTxnidIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.agentInfo = iprot.readString();
+        struct.setAgentInfoIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
new file mode 100644
index 0000000..fdaab4b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockResponse.java
@@ -0,0 +1,500 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class LockResponse implements org.apache.thrift.TBase<LockResponse, LockResponse._Fields>, java.io.Serializable, Cloneable, Comparable<LockResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockResponse");
+
+  private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("state", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new LockResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new LockResponseTupleSchemeFactory());
+  }
+
+  private long lockid; // required
+  private LockState state; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOCKID((short)1, "lockid"),
+    /**
+     * 
+     * @see LockState
+     */
+    STATE((short)2, "state");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOCKID
+          return LOCKID;
+        case 2: // STATE
+          return STATE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __LOCKID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOCKID, new org.apache.thrift.meta_data.FieldMetaData("lockid", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.STATE, new org.apache.thrift.meta_data.FieldMetaData("state", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, LockState.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LockResponse.class, metaDataMap);
+  }
+
+  public LockResponse() {
+  }
+
+  public LockResponse(
+    long lockid,
+    LockState state)
+  {
+    this();
+    this.lockid = lockid;
+    setLockidIsSet(true);
+    this.state = state;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public LockResponse(LockResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.lockid = other.lockid;
+    if (other.isSetState()) {
+      this.state = other.state;
+    }
+  }
+
+  public LockResponse deepCopy() {
+    return new LockResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setLockidIsSet(false);
+    this.lockid = 0;
+    this.state = null;
+  }
+
+  public long getLockid() {
+    return this.lockid;
+  }
+
+  public void setLockid(long lockid) {
+    this.lockid = lockid;
+    setLockidIsSet(true);
+  }
+
+  public void unsetLockid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOCKID_ISSET_ID);
+  }
+
+  /** Returns true if field lockid is set (has been assigned a value) and false otherwise */
+  public boolean isSetLockid() {
+    return EncodingUtils.testBit(__isset_bitfield, __LOCKID_ISSET_ID);
+  }
+
+  public void setLockidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOCKID_ISSET_ID, value);
+  }
+
+  /**
+   * 
+   * @see LockState
+   */
+  public LockState getState() {
+    return this.state;
+  }
+
+  /**
+   * 
+   * @see LockState
+   */
+  public void setState(LockState state) {
+    this.state = state;
+  }
+
+  public void unsetState() {
+    this.state = null;
+  }
+
+  /** Returns true if field state is set (has been assigned a value) and false otherwise */
+  public boolean isSetState() {
+    return this.state != null;
+  }
+
+  public void setStateIsSet(boolean value) {
+    if (!value) {
+      this.state = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOCKID:
+      if (value == null) {
+        unsetLockid();
+      } else {
+        setLockid((Long)value);
+      }
+      break;
+
+    case STATE:
+      if (value == null) {
+        unsetState();
+      } else {
+        setState((LockState)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOCKID:
+      return getLockid();
+
+    case STATE:
+      return getState();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOCKID:
+      return isSetLockid();
+    case STATE:
+      return isSetState();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof LockResponse)
+      return this.equals((LockResponse)that);
+    return false;
+  }
+
+  public boolean equals(LockResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lockid = true;
+    boolean that_present_lockid = true;
+    if (this_present_lockid || that_present_lockid) {
+      if (!(this_present_lockid && that_present_lockid))
+        return false;
+      if (this.lockid != that.lockid)
+        return false;
+    }
+
+    boolean this_present_state = true && this.isSetState();
+    boolean that_present_state = true && that.isSetState();
+    if (this_present_state || that_present_state) {
+      if (!(this_present_state && that_present_state))
+        return false;
+      if (!this.state.equals(that.state))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lockid = true;
+    list.add(present_lockid);
+    if (present_lockid)
+      list.add(lockid);
+
+    boolean present_state = true && (isSetState());
+    list.add(present_state);
+    if (present_state)
+      list.add(state.getValue());
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(LockResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLockid()).compareTo(other.isSetLockid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLockid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lockid, other.lockid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetState()).compareTo(other.isSetState());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetState()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.state, other.state);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("LockResponse(");
+    boolean first = true;
+
+    sb.append("lockid:");
+    sb.append(this.lockid);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("state:");
+    if (this.state == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.state);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetLockid()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'lockid' is unset! Struct:" + toString());
+    }
+
+    if (!isSetState()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'state' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class LockResponseStandardSchemeFactory implements SchemeFactory {
+    public LockResponseStandardScheme getScheme() {
+      return new LockResponseStandardScheme();
+    }
+  }
+
+  private static class LockResponseStandardScheme extends StandardScheme<LockResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, LockResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOCKID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.lockid = iprot.readI64();
+              struct.setLockidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // STATE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.state = org.apache.hadoop.hive.metastore.api.LockState.findByValue(iprot.readI32());
+              struct.setStateIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, LockResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(LOCKID_FIELD_DESC);
+      oprot.writeI64(struct.lockid);
+      oprot.writeFieldEnd();
+      if (struct.state != null) {
+        oprot.writeFieldBegin(STATE_FIELD_DESC);
+        oprot.writeI32(struct.state.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class LockResponseTupleSchemeFactory implements SchemeFactory {
+    public LockResponseTupleScheme getScheme() {
+      return new LockResponseTupleScheme();
+    }
+  }
+
+  private static class LockResponseTupleScheme extends TupleScheme<LockResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, LockResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.lockid);
+      oprot.writeI32(struct.state.getValue());
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, LockResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.lockid = iprot.readI64();
+      struct.setLockidIsSet(true);
+      struct.state = org.apache.hadoop.hive.metastore.api.LockState.findByValue(iprot.readI32());
+      struct.setStateIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockState.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockState.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockState.java
new file mode 100644
index 0000000..48a0bbd
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockState.java
@@ -0,0 +1,51 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum LockState implements org.apache.thrift.TEnum {
+  ACQUIRED(1),
+  WAITING(2),
+  ABORT(3),
+  NOT_ACQUIRED(4);
+
+  private final int value;
+
+  private LockState(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static LockState findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return ACQUIRED;
+      case 2:
+        return WAITING;
+      case 3:
+        return ABORT;
+      case 4:
+        return NOT_ACQUIRED;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
new file mode 100644
index 0000000..8ae4351
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockType.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum LockType implements org.apache.thrift.TEnum {
+  SHARED_READ(1),
+  SHARED_WRITE(2),
+  EXCLUSIVE(3);
+
+  private final int value;
+
+  private LockType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static LockType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return SHARED_READ;
+      case 2:
+        return SHARED_WRITE;
+      case 3:
+        return EXCLUSIVE;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
new file mode 100644
index 0000000..e196d57
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LongColumnStatsData.java
@@ -0,0 +1,799 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class LongColumnStatsData implements org.apache.thrift.TBase<LongColumnStatsData, LongColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<LongColumnStatsData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LongColumnStatsData");
+
+  private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4);
+  private static final org.apache.thrift.protocol.TField BIT_VECTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("bitVectors", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new LongColumnStatsDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new LongColumnStatsDataTupleSchemeFactory());
+  }
+
+  private long lowValue; // optional
+  private long highValue; // optional
+  private long numNulls; // required
+  private long numDVs; // required
+  private ByteBuffer bitVectors; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOW_VALUE((short)1, "lowValue"),
+    HIGH_VALUE((short)2, "highValue"),
+    NUM_NULLS((short)3, "numNulls"),
+    NUM_DVS((short)4, "numDVs"),
+    BIT_VECTORS((short)5, "bitVectors");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOW_VALUE
+          return LOW_VALUE;
+        case 2: // HIGH_VALUE
+          return HIGH_VALUE;
+        case 3: // NUM_NULLS
+          return NUM_NULLS;
+        case 4: // NUM_DVS
+          return NUM_DVS;
+        case 5: // BIT_VECTORS
+          return BIT_VECTORS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __LOWVALUE_ISSET_ID = 0;
+  private static final int __HIGHVALUE_ISSET_ID = 1;
+  private static final int __NUMNULLS_ISSET_ID = 2;
+  private static final int __NUMDVS_ISSET_ID = 3;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE,_Fields.BIT_VECTORS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.BIT_VECTORS, new org.apache.thrift.meta_data.FieldMetaData("bitVectors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LongColumnStatsData.class, metaDataMap);
+  }
+
+  public LongColumnStatsData() {
+  }
+
+  public LongColumnStatsData(
+    long numNulls,
+    long numDVs)
+  {
+    this();
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public LongColumnStatsData(LongColumnStatsData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.lowValue = other.lowValue;
+    this.highValue = other.highValue;
+    this.numNulls = other.numNulls;
+    this.numDVs = other.numDVs;
+    if (other.isSetBitVectors()) {
+      this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(other.bitVectors);
+    }
+  }
+
+  public LongColumnStatsData deepCopy() {
+    return new LongColumnStatsData(this);
+  }
+
+  @Override
+  public void clear() {
+    setLowValueIsSet(false);
+    this.lowValue = 0;
+    setHighValueIsSet(false);
+    this.highValue = 0;
+    setNumNullsIsSet(false);
+    this.numNulls = 0;
+    setNumDVsIsSet(false);
+    this.numDVs = 0;
+    this.bitVectors = null;
+  }
+
+  public long getLowValue() {
+    return this.lowValue;
+  }
+
+  public void setLowValue(long lowValue) {
+    this.lowValue = lowValue;
+    setLowValueIsSet(true);
+  }
+
+  public void unsetLowValue() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWVALUE_ISSET_ID);
+  }
+
+  /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetLowValue() {
+    return EncodingUtils.testBit(__isset_bitfield, __LOWVALUE_ISSET_ID);
+  }
+
+  public void setLowValueIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWVALUE_ISSET_ID, value);
+  }
+
+  public long getHighValue() {
+    return this.highValue;
+  }
+
+  public void setHighValue(long highValue) {
+    this.highValue = highValue;
+    setHighValueIsSet(true);
+  }
+
+  public void unsetHighValue() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHVALUE_ISSET_ID);
+  }
+
+  /** Returns true if field highValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetHighValue() {
+    return EncodingUtils.testBit(__isset_bitfield, __HIGHVALUE_ISSET_ID);
+  }
+
+  public void setHighValueIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHVALUE_ISSET_ID, value);
+  }
+
+  public long getNumNulls() {
+    return this.numNulls;
+  }
+
+  public void setNumNulls(long numNulls) {
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  public void unsetNumNulls() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumNulls() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  public void setNumNullsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value);
+  }
+
+  public long getNumDVs() {
+    return this.numDVs;
+  }
+
+  public void setNumDVs(long numDVs) {
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  public void unsetNumDVs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumDVs() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  public void setNumDVsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value);
+  }
+
+  public byte[] getBitVectors() {
+    setBitVectors(org.apache.thrift.TBaseHelper.rightSize(bitVectors));
+    return bitVectors == null ? null : bitVectors.array();
+  }
+
+  public ByteBuffer bufferForBitVectors() {
+    return org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void setBitVectors(byte[] bitVectors) {
+    this.bitVectors = bitVectors == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bitVectors, bitVectors.length));
+  }
+
+  public void setBitVectors(ByteBuffer bitVectors) {
+    this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void unsetBitVectors() {
+    this.bitVectors = null;
+  }
+
+  /** Returns true if field bitVectors is set (has been assigned a value) and false otherwise */
+  public boolean isSetBitVectors() {
+    return this.bitVectors != null;
+  }
+
+  public void setBitVectorsIsSet(boolean value) {
+    if (!value) {
+      this.bitVectors = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOW_VALUE:
+      if (value == null) {
+        unsetLowValue();
+      } else {
+        setLowValue((Long)value);
+      }
+      break;
+
+    case HIGH_VALUE:
+      if (value == null) {
+        unsetHighValue();
+      } else {
+        setHighValue((Long)value);
+      }
+      break;
+
+    case NUM_NULLS:
+      if (value == null) {
+        unsetNumNulls();
+      } else {
+        setNumNulls((Long)value);
+      }
+      break;
+
+    case NUM_DVS:
+      if (value == null) {
+        unsetNumDVs();
+      } else {
+        setNumDVs((Long)value);
+      }
+      break;
+
+    case BIT_VECTORS:
+      if (value == null) {
+        unsetBitVectors();
+      } else {
+        setBitVectors((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOW_VALUE:
+      return getLowValue();
+
+    case HIGH_VALUE:
+      return getHighValue();
+
+    case NUM_NULLS:
+      return getNumNulls();
+
+    case NUM_DVS:
+      return getNumDVs();
+
+    case BIT_VECTORS:
+      return getBitVectors();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOW_VALUE:
+      return isSetLowValue();
+    case HIGH_VALUE:
+      return isSetHighValue();
+    case NUM_NULLS:
+      return isSetNumNulls();
+    case NUM_DVS:
+      return isSetNumDVs();
+    case BIT_VECTORS:
+      return isSetBitVectors();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof LongColumnStatsData)
+      return this.equals((LongColumnStatsData)that);
+    return false;
+  }
+
+  public boolean equals(LongColumnStatsData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lowValue = true && this.isSetLowValue();
+    boolean that_present_lowValue = true && that.isSetLowValue();
+    if (this_present_lowValue || that_present_lowValue) {
+      if (!(this_present_lowValue && that_present_lowValue))
+        return false;
+      if (this.lowValue != that.lowValue)
+        return false;
+    }
+
+    boolean this_present_highValue = true && this.isSetHighValue();
+    boolean that_present_highValue = true && that.isSetHighValue();
+    if (this_present_highValue || that_present_highValue) {
+      if (!(this_present_highValue && that_present_highValue))
+        return false;
+      if (this.highValue != that.highValue)
+        return false;
+    }
+
+    boolean this_present_numNulls = true;
+    boolean that_present_numNulls = true;
+    if (this_present_numNulls || that_present_numNulls) {
+      if (!(this_present_numNulls && that_present_numNulls))
+        return false;
+      if (this.numNulls != that.numNulls)
+        return false;
+    }
+
+    boolean this_present_numDVs = true;
+    boolean that_present_numDVs = true;
+    if (this_present_numDVs || that_present_numDVs) {
+      if (!(this_present_numDVs && that_present_numDVs))
+        return false;
+      if (this.numDVs != that.numDVs)
+        return false;
+    }
+
+    boolean this_present_bitVectors = true && this.isSetBitVectors();
+    boolean that_present_bitVectors = true && that.isSetBitVectors();
+    if (this_present_bitVectors || that_present_bitVectors) {
+      if (!(this_present_bitVectors && that_present_bitVectors))
+        return false;
+      if (!this.bitVectors.equals(that.bitVectors))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lowValue = true && (isSetLowValue());
+    list.add(present_lowValue);
+    if (present_lowValue)
+      list.add(lowValue);
+
+    boolean present_highValue = true && (isSetHighValue());
+    list.add(present_highValue);
+    if (present_highValue)
+      list.add(highValue);
+
+    boolean present_numNulls = true;
+    list.add(present_numNulls);
+    if (present_numNulls)
+      list.add(numNulls);
+
+    boolean present_numDVs = true;
+    list.add(present_numDVs);
+    if (present_numDVs)
+      list.add(numDVs);
+
+    boolean present_bitVectors = true && (isSetBitVectors());
+    list.add(present_bitVectors);
+    if (present_bitVectors)
+      list.add(bitVectors);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(LongColumnStatsData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(other.isSetLowValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLowValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, other.lowValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(other.isSetHighValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHighValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, other.highValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(other.isSetNumNulls());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumNulls()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, other.numNulls);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(other.isSetNumDVs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumDVs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, other.numDVs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetBitVectors()).compareTo(other.isSetBitVectors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBitVectors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bitVectors, other.bitVectors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("LongColumnStatsData(");
+    boolean first = true;
+
+    if (isSetLowValue()) {
+      sb.append("lowValue:");
+      sb.append(this.lowValue);
+      first = false;
+    }
+    if (isSetHighValue()) {
+      if (!first) sb.append(", ");
+      sb.append("highValue:");
+      sb.append(this.highValue);
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("numNulls:");
+    sb.append(this.numNulls);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numDVs:");
+    sb.append(this.numDVs);
+    first = false;
+    if (isSetBitVectors()) {
+      if (!first) sb.append(", ");
+      sb.append("bitVectors:");
+      if (this.bitVectors == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.bitVectors, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNumNulls()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumDVs()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class LongColumnStatsDataStandardSchemeFactory implements SchemeFactory {
+    public LongColumnStatsDataStandardScheme getScheme() {
+      return new LongColumnStatsDataStandardScheme();
+    }
+  }
+
+  private static class LongColumnStatsDataStandardScheme extends StandardScheme<LongColumnStatsData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, LongColumnStatsData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOW_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.lowValue = iprot.readI64();
+              struct.setLowValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // HIGH_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.highValue = iprot.readI64();
+              struct.setHighValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NUM_NULLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numNulls = iprot.readI64();
+              struct.setNumNullsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // NUM_DVS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numDVs = iprot.readI64();
+              struct.setNumDVsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // BIT_VECTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.bitVectors = iprot.readBinary();
+              struct.setBitVectorsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, LongColumnStatsData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetLowValue()) {
+        oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC);
+        oprot.writeI64(struct.lowValue);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetHighValue()) {
+        oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC);
+        oprot.writeI64(struct.highValue);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC);
+      oprot.writeI64(struct.numNulls);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_DVS_FIELD_DESC);
+      oprot.writeI64(struct.numDVs);
+      oprot.writeFieldEnd();
+      if (struct.bitVectors != null) {
+        if (struct.isSetBitVectors()) {
+          oprot.writeFieldBegin(BIT_VECTORS_FIELD_DESC);
+          oprot.writeBinary(struct.bitVectors);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class LongColumnStatsDataTupleSchemeFactory implements SchemeFactory {
+    public LongColumnStatsDataTupleScheme getScheme() {
+      return new LongColumnStatsDataTupleScheme();
+    }
+  }
+
+  private static class LongColumnStatsDataTupleScheme extends TupleScheme<LongColumnStatsData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, LongColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.numNulls);
+      oprot.writeI64(struct.numDVs);
+      BitSet optionals = new BitSet();
+      if (struct.isSetLowValue()) {
+        optionals.set(0);
+      }
+      if (struct.isSetHighValue()) {
+        optionals.set(1);
+      }
+      if (struct.isSetBitVectors()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetLowValue()) {
+        oprot.writeI64(struct.lowValue);
+      }
+      if (struct.isSetHighValue()) {
+        oprot.writeI64(struct.highValue);
+      }
+      if (struct.isSetBitVectors()) {
+        oprot.writeBinary(struct.bitVectors);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, LongColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.numNulls = iprot.readI64();
+      struct.setNumNullsIsSet(true);
+      struct.numDVs = iprot.readI64();
+      struct.setNumDVsIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.lowValue = iprot.readI64();
+        struct.setLowValueIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.highValue = iprot.readI64();
+        struct.setHighValueIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.bitVectors = iprot.readBinary();
+        struct.setBitVectorsIsSet(true);
+      }
+    }
+  }
+
+}
+


[89/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
index 0000000,0000000..cc6ecdf
new file mode 100644
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsRequest.java
@@@ -1,0 -1,0 +1,1067 @@@
++/**
++ * Autogenerated by Thrift Compiler (0.9.3)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.hadoop.hive.metastore.api;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import org.apache.thrift.async.AsyncMethodCallback;
++import org.apache.thrift.server.AbstractNonblockingServer.*;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import javax.annotation.Generated;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
++@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsRequest implements org.apache.thrift.TBase<AlterPartitionsRequest, AlterPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AlterPartitionsRequest> {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsRequest");
++
++  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
++  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
++  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)3);
++  private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)4);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
++  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)6);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new AlterPartitionsRequestStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new AlterPartitionsRequestTupleSchemeFactory());
++  }
++
++  private String dbName; // required
++  private String tableName; // required
++  private List<Partition> partitions; // required
++  private EnvironmentContext environmentContext; // required
++  private long txnId; // optional
++  private long writeId; // optional
++  private String validWriteIdList; // optional
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++    DB_NAME((short)1, "dbName"),
++    TABLE_NAME((short)2, "tableName"),
++    PARTITIONS((short)3, "partitions"),
++    ENVIRONMENT_CONTEXT((short)4, "environmentContext"),
++    TXN_ID((short)5, "txnId"),
++    WRITE_ID((short)6, "writeId"),
++    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        case 1: // DB_NAME
++          return DB_NAME;
++        case 2: // TABLE_NAME
++          return TABLE_NAME;
++        case 3: // PARTITIONS
++          return PARTITIONS;
++        case 4: // ENVIRONMENT_CONTEXT
++          return ENVIRONMENT_CONTEXT;
++        case 5: // TXN_ID
++          return TXN_ID;
++        case 6: // WRITE_ID
++          return WRITE_ID;
++        case 7: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++
++  // isset id assignments
++  private static final int __TXNID_ISSET_ID = 0;
++  private static final int __WRITEID_ISSET_ID = 1;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.REQUIRED, 
++        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
++            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
++    tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.REQUIRED, 
++        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsRequest.class, metaDataMap);
++  }
++
++  public AlterPartitionsRequest() {
++    this.txnId = -1L;
++
++    this.writeId = -1L;
++
++  }
++
++  public AlterPartitionsRequest(
++    String dbName,
++    String tableName,
++    List<Partition> partitions,
++    EnvironmentContext environmentContext)
++  {
++    this();
++    this.dbName = dbName;
++    this.tableName = tableName;
++    this.partitions = partitions;
++    this.environmentContext = environmentContext;
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public AlterPartitionsRequest(AlterPartitionsRequest other) {
++    __isset_bitfield = other.__isset_bitfield;
++    if (other.isSetDbName()) {
++      this.dbName = other.dbName;
++    }
++    if (other.isSetTableName()) {
++      this.tableName = other.tableName;
++    }
++    if (other.isSetPartitions()) {
++      List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
++      for (Partition other_element : other.partitions) {
++        __this__partitions.add(new Partition(other_element));
++      }
++      this.partitions = __this__partitions;
++    }
++    if (other.isSetEnvironmentContext()) {
++      this.environmentContext = new EnvironmentContext(other.environmentContext);
++    }
++    this.txnId = other.txnId;
++    this.writeId = other.writeId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
++  }
++
++  public AlterPartitionsRequest deepCopy() {
++    return new AlterPartitionsRequest(this);
++  }
++
++  @Override
++  public void clear() {
++    this.dbName = null;
++    this.tableName = null;
++    this.partitions = null;
++    this.environmentContext = null;
++    this.txnId = -1L;
++
++    this.writeId = -1L;
++
++    this.validWriteIdList = null;
++  }
++
++  public String getDbName() {
++    return this.dbName;
++  }
++
++  public void setDbName(String dbName) {
++    this.dbName = dbName;
++  }
++
++  public void unsetDbName() {
++    this.dbName = null;
++  }
++
++  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
++  public boolean isSetDbName() {
++    return this.dbName != null;
++  }
++
++  public void setDbNameIsSet(boolean value) {
++    if (!value) {
++      this.dbName = null;
++    }
++  }
++
++  public String getTableName() {
++    return this.tableName;
++  }
++
++  public void setTableName(String tableName) {
++    this.tableName = tableName;
++  }
++
++  public void unsetTableName() {
++    this.tableName = null;
++  }
++
++  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
++  public boolean isSetTableName() {
++    return this.tableName != null;
++  }
++
++  public void setTableNameIsSet(boolean value) {
++    if (!value) {
++      this.tableName = null;
++    }
++  }
++
++  public int getPartitionsSize() {
++    return (this.partitions == null) ? 0 : this.partitions.size();
++  }
++
++  public java.util.Iterator<Partition> getPartitionsIterator() {
++    return (this.partitions == null) ? null : this.partitions.iterator();
++  }
++
++  public void addToPartitions(Partition elem) {
++    if (this.partitions == null) {
++      this.partitions = new ArrayList<Partition>();
++    }
++    this.partitions.add(elem);
++  }
++
++  public List<Partition> getPartitions() {
++    return this.partitions;
++  }
++
++  public void setPartitions(List<Partition> partitions) {
++    this.partitions = partitions;
++  }
++
++  public void unsetPartitions() {
++    this.partitions = null;
++  }
++
++  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
++  public boolean isSetPartitions() {
++    return this.partitions != null;
++  }
++
++  public void setPartitionsIsSet(boolean value) {
++    if (!value) {
++      this.partitions = null;
++    }
++  }
++
++  public EnvironmentContext getEnvironmentContext() {
++    return this.environmentContext;
++  }
++
++  public void setEnvironmentContext(EnvironmentContext environmentContext) {
++    this.environmentContext = environmentContext;
++  }
++
++  public void unsetEnvironmentContext() {
++    this.environmentContext = null;
++  }
++
++  /** Returns true if field environmentContext is set (has been assigned a value) and false otherwise */
++  public boolean isSetEnvironmentContext() {
++    return this.environmentContext != null;
++  }
++
++  public void setEnvironmentContextIsSet(boolean value) {
++    if (!value) {
++      this.environmentContext = null;
++    }
++  }
++
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public long getWriteId() {
++    return this.writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++    setWriteIdIsSet(true);
++  }
++
++  public void unsetWriteId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
++  public boolean isSetWriteId() {
++    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  public void setWriteIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    case DB_NAME:
++      if (value == null) {
++        unsetDbName();
++      } else {
++        setDbName((String)value);
++      }
++      break;
++
++    case TABLE_NAME:
++      if (value == null) {
++        unsetTableName();
++      } else {
++        setTableName((String)value);
++      }
++      break;
++
++    case PARTITIONS:
++      if (value == null) {
++        unsetPartitions();
++      } else {
++        setPartitions((List<Partition>)value);
++      }
++      break;
++
++    case ENVIRONMENT_CONTEXT:
++      if (value == null) {
++        unsetEnvironmentContext();
++      } else {
++        setEnvironmentContext((EnvironmentContext)value);
++      }
++      break;
++
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case WRITE_ID:
++      if (value == null) {
++        unsetWriteId();
++      } else {
++        setWriteId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    case DB_NAME:
++      return getDbName();
++
++    case TABLE_NAME:
++      return getTableName();
++
++    case PARTITIONS:
++      return getPartitions();
++
++    case ENVIRONMENT_CONTEXT:
++      return getEnvironmentContext();
++
++    case TXN_ID:
++      return getTxnId();
++
++    case WRITE_ID:
++      return getWriteId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    case DB_NAME:
++      return isSetDbName();
++    case TABLE_NAME:
++      return isSetTableName();
++    case PARTITIONS:
++      return isSetPartitions();
++    case ENVIRONMENT_CONTEXT:
++      return isSetEnvironmentContext();
++    case TXN_ID:
++      return isSetTxnId();
++    case WRITE_ID:
++      return isSetWriteId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof AlterPartitionsRequest)
++      return this.equals((AlterPartitionsRequest)that);
++    return false;
++  }
++
++  public boolean equals(AlterPartitionsRequest that) {
++    if (that == null)
++      return false;
++
++    boolean this_present_dbName = true && this.isSetDbName();
++    boolean that_present_dbName = true && that.isSetDbName();
++    if (this_present_dbName || that_present_dbName) {
++      if (!(this_present_dbName && that_present_dbName))
++        return false;
++      if (!this.dbName.equals(that.dbName))
++        return false;
++    }
++
++    boolean this_present_tableName = true && this.isSetTableName();
++    boolean that_present_tableName = true && that.isSetTableName();
++    if (this_present_tableName || that_present_tableName) {
++      if (!(this_present_tableName && that_present_tableName))
++        return false;
++      if (!this.tableName.equals(that.tableName))
++        return false;
++    }
++
++    boolean this_present_partitions = true && this.isSetPartitions();
++    boolean that_present_partitions = true && that.isSetPartitions();
++    if (this_present_partitions || that_present_partitions) {
++      if (!(this_present_partitions && that_present_partitions))
++        return false;
++      if (!this.partitions.equals(that.partitions))
++        return false;
++    }
++
++    boolean this_present_environmentContext = true && this.isSetEnvironmentContext();
++    boolean that_present_environmentContext = true && that.isSetEnvironmentContext();
++    if (this_present_environmentContext || that_present_environmentContext) {
++      if (!(this_present_environmentContext && that_present_environmentContext))
++        return false;
++      if (!this.environmentContext.equals(that.environmentContext))
++        return false;
++    }
++
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_writeId = true && this.isSetWriteId();
++    boolean that_present_writeId = true && that.isSetWriteId();
++    if (this_present_writeId || that_present_writeId) {
++      if (!(this_present_writeId && that_present_writeId))
++        return false;
++      if (this.writeId != that.writeId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    List<Object> list = new ArrayList<Object>();
++
++    boolean present_dbName = true && (isSetDbName());
++    list.add(present_dbName);
++    if (present_dbName)
++      list.add(dbName);
++
++    boolean present_tableName = true && (isSetTableName());
++    list.add(present_tableName);
++    if (present_tableName)
++      list.add(tableName);
++
++    boolean present_partitions = true && (isSetPartitions());
++    list.add(present_partitions);
++    if (present_partitions)
++      list.add(partitions);
++
++    boolean present_environmentContext = true && (isSetEnvironmentContext());
++    list.add(present_environmentContext);
++    if (present_environmentContext)
++      list.add(environmentContext);
++
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_writeId = true && (isSetWriteId());
++    list.add(present_writeId);
++    if (present_writeId)
++      list.add(writeId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
++    return list.hashCode();
++  }
++
++  @Override
++  public int compareTo(AlterPartitionsRequest other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++
++    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetDbName()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTableName()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetPartitions()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetEnvironmentContext()).compareTo(other.isSetEnvironmentContext());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetEnvironmentContext()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environmentContext, other.environmentContext);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetWriteId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("AlterPartitionsRequest(");
++    boolean first = true;
++
++    sb.append("dbName:");
++    if (this.dbName == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.dbName);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("tableName:");
++    if (this.tableName == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.tableName);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("partitions:");
++    if (this.partitions == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.partitions);
++    }
++    first = false;
++    if (!first) sb.append(", ");
++    sb.append("environmentContext:");
++    if (this.environmentContext == null) {
++      sb.append("null");
++    } else {
++      sb.append(this.environmentContext);
++    }
++    first = false;
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetWriteId()) {
++      if (!first) sb.append(", ");
++      sb.append("writeId:");
++      sb.append(this.writeId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    if (!isSetDbName()) {
++      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
++    }
++
++    if (!isSetTableName()) {
++      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
++    }
++
++    if (!isSetPartitions()) {
++      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitions' is unset! Struct:" + toString());
++    }
++
++    if (!isSetEnvironmentContext()) {
++      throw new org.apache.thrift.protocol.TProtocolException("Required field 'environmentContext' is unset! Struct:" + toString());
++    }
++
++    // check for sub-struct validity
++    if (environmentContext != null) {
++      environmentContext.validate();
++    }
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class AlterPartitionsRequestStandardSchemeFactory implements SchemeFactory {
++    public AlterPartitionsRequestStandardScheme getScheme() {
++      return new AlterPartitionsRequestStandardScheme();
++    }
++  }
++
++  private static class AlterPartitionsRequestStandardScheme extends StandardScheme<AlterPartitionsRequest> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          case 1: // DB_NAME
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.dbName = iprot.readString();
++              struct.setDbNameIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 2: // TABLE_NAME
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.tableName = iprot.readString();
++              struct.setTableNameIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 3: // PARTITIONS
++            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
++              {
++                org.apache.thrift.protocol.TList _list952 = iprot.readListBegin();
++                struct.partitions = new ArrayList<Partition>(_list952.size);
++                Partition _elem953;
++                for (int _i954 = 0; _i954 < _list952.size; ++_i954)
++                {
++                  _elem953 = new Partition();
++                  _elem953.read(iprot);
++                  struct.partitions.add(_elem953);
++                }
++                iprot.readListEnd();
++              }
++              struct.setPartitionsIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // ENVIRONMENT_CONTEXT
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
++              struct.environmentContext = new EnvironmentContext();
++              struct.environmentContext.read(iprot);
++              struct.setEnvironmentContextIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 5: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 6: // WRITE_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.writeId = iprot.readI64();
++              struct.setWriteIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 7: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      if (struct.dbName != null) {
++        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
++        oprot.writeString(struct.dbName);
++        oprot.writeFieldEnd();
++      }
++      if (struct.tableName != null) {
++        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
++        oprot.writeString(struct.tableName);
++        oprot.writeFieldEnd();
++      }
++      if (struct.partitions != null) {
++        oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
++        {
++          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
++          for (Partition _iter955 : struct.partitions)
++          {
++            _iter955.write(oprot);
++          }
++          oprot.writeListEnd();
++        }
++        oprot.writeFieldEnd();
++      }
++      if (struct.environmentContext != null) {
++        oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC);
++        struct.environmentContext.write(oprot);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetWriteId()) {
++        oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
++        oprot.writeI64(struct.writeId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class AlterPartitionsRequestTupleSchemeFactory implements SchemeFactory {
++    public AlterPartitionsRequestTupleScheme getScheme() {
++      return new AlterPartitionsRequestTupleScheme();
++    }
++  }
++
++  private static class AlterPartitionsRequestTupleScheme extends TupleScheme<AlterPartitionsRequest> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++      oprot.writeString(struct.dbName);
++      oprot.writeString(struct.tableName);
++      {
++        oprot.writeI32(struct.partitions.size());
++        for (Partition _iter956 : struct.partitions)
++        {
++          _iter956.write(oprot);
++        }
++      }
++      struct.environmentContext.write(oprot);
++      BitSet optionals = new BitSet();
++      if (struct.isSetTxnId()) {
++        optionals.set(0);
++      }
++      if (struct.isSetWriteId()) {
++        optionals.set(1);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(2);
++      }
++      oprot.writeBitSet(optionals, 3);
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetWriteId()) {
++        oprot.writeI64(struct.writeId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsRequest struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++      struct.dbName = iprot.readString();
++      struct.setDbNameIsSet(true);
++      struct.tableName = iprot.readString();
++      struct.setTableNameIsSet(true);
++      {
++        org.apache.thrift.protocol.TList _list957 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
++        struct.partitions = new ArrayList<Partition>(_list957.size);
++        Partition _elem958;
++        for (int _i959 = 0; _i959 < _list957.size; ++_i959)
++        {
++          _elem958 = new Partition();
++          _elem958.read(iprot);
++          struct.partitions.add(_elem958);
++        }
++      }
++      struct.setPartitionsIsSet(true);
++      struct.environmentContext = new EnvironmentContext();
++      struct.environmentContext.read(iprot);
++      struct.setEnvironmentContextIsSet(true);
++      BitSet incoming = iprot.readBitSet(3);
++      if (incoming.get(0)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.writeId = iprot.readI64();
++        struct.setWriteIdIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java
index 0000000,0000000..8e03462
new file mode 100644
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterPartitionsResponse.java
@@@ -1,0 -1,0 +1,283 @@@
++/**
++ * Autogenerated by Thrift Compiler (0.9.3)
++ *
++ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
++ *  @generated
++ */
++package org.apache.hadoop.hive.metastore.api;
++
++import org.apache.thrift.scheme.IScheme;
++import org.apache.thrift.scheme.SchemeFactory;
++import org.apache.thrift.scheme.StandardScheme;
++
++import org.apache.thrift.scheme.TupleScheme;
++import org.apache.thrift.protocol.TTupleProtocol;
++import org.apache.thrift.protocol.TProtocolException;
++import org.apache.thrift.EncodingUtils;
++import org.apache.thrift.TException;
++import org.apache.thrift.async.AsyncMethodCallback;
++import org.apache.thrift.server.AbstractNonblockingServer.*;
++import java.util.List;
++import java.util.ArrayList;
++import java.util.Map;
++import java.util.HashMap;
++import java.util.EnumMap;
++import java.util.Set;
++import java.util.HashSet;
++import java.util.EnumSet;
++import java.util.Collections;
++import java.util.BitSet;
++import java.nio.ByteBuffer;
++import java.util.Arrays;
++import javax.annotation.Generated;
++import org.slf4j.Logger;
++import org.slf4j.LoggerFactory;
++
++@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
++@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
++@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterPartitionsResponse implements org.apache.thrift.TBase<AlterPartitionsResponse, AlterPartitionsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<AlterPartitionsResponse> {
++  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterPartitionsResponse");
++
++
++  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
++  static {
++    schemes.put(StandardScheme.class, new AlterPartitionsResponseStandardSchemeFactory());
++    schemes.put(TupleScheme.class, new AlterPartitionsResponseTupleSchemeFactory());
++  }
++
++
++  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
++  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
++;
++
++    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
++
++    static {
++      for (_Fields field : EnumSet.allOf(_Fields.class)) {
++        byName.put(field.getFieldName(), field);
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, or null if its not found.
++     */
++    public static _Fields findByThriftId(int fieldId) {
++      switch(fieldId) {
++        default:
++          return null;
++      }
++    }
++
++    /**
++     * Find the _Fields constant that matches fieldId, throwing an exception
++     * if it is not found.
++     */
++    public static _Fields findByThriftIdOrThrow(int fieldId) {
++      _Fields fields = findByThriftId(fieldId);
++      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
++      return fields;
++    }
++
++    /**
++     * Find the _Fields constant that matches name, or null if its not found.
++     */
++    public static _Fields findByName(String name) {
++      return byName.get(name);
++    }
++
++    private final short _thriftId;
++    private final String _fieldName;
++
++    _Fields(short thriftId, String fieldName) {
++      _thriftId = thriftId;
++      _fieldName = fieldName;
++    }
++
++    public short getThriftFieldId() {
++      return _thriftId;
++    }
++
++    public String getFieldName() {
++      return _fieldName;
++    }
++  }
++  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
++  static {
++    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
++    metaDataMap = Collections.unmodifiableMap(tmpMap);
++    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterPartitionsResponse.class, metaDataMap);
++  }
++
++  public AlterPartitionsResponse() {
++  }
++
++  /**
++   * Performs a deep copy on <i>other</i>.
++   */
++  public AlterPartitionsResponse(AlterPartitionsResponse other) {
++  }
++
++  public AlterPartitionsResponse deepCopy() {
++    return new AlterPartitionsResponse(this);
++  }
++
++  @Override
++  public void clear() {
++  }
++
++  public void setFieldValue(_Fields field, Object value) {
++    switch (field) {
++    }
++  }
++
++  public Object getFieldValue(_Fields field) {
++    switch (field) {
++    }
++    throw new IllegalStateException();
++  }
++
++  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
++  public boolean isSet(_Fields field) {
++    if (field == null) {
++      throw new IllegalArgumentException();
++    }
++
++    switch (field) {
++    }
++    throw new IllegalStateException();
++  }
++
++  @Override
++  public boolean equals(Object that) {
++    if (that == null)
++      return false;
++    if (that instanceof AlterPartitionsResponse)
++      return this.equals((AlterPartitionsResponse)that);
++    return false;
++  }
++
++  public boolean equals(AlterPartitionsResponse that) {
++    if (that == null)
++      return false;
++
++    return true;
++  }
++
++  @Override
++  public int hashCode() {
++    List<Object> list = new ArrayList<Object>();
++
++    return list.hashCode();
++  }
++
++  @Override
++  public int compareTo(AlterPartitionsResponse other) {
++    if (!getClass().equals(other.getClass())) {
++      return getClass().getName().compareTo(other.getClass().getName());
++    }
++
++    int lastComparison = 0;
++
++    return 0;
++  }
++
++  public _Fields fieldForId(int fieldId) {
++    return _Fields.findByThriftId(fieldId);
++  }
++
++  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
++    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
++  }
++
++  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
++    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
++  }
++
++  @Override
++  public String toString() {
++    StringBuilder sb = new StringBuilder("AlterPartitionsResponse(");
++    boolean first = true;
++
++    sb.append(")");
++    return sb.toString();
++  }
++
++  public void validate() throws org.apache.thrift.TException {
++    // check for required fields
++    // check for sub-struct validity
++  }
++
++  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
++    try {
++      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
++    try {
++      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
++    } catch (org.apache.thrift.TException te) {
++      throw new java.io.IOException(te);
++    }
++  }
++
++  private static class AlterPartitionsResponseStandardSchemeFactory implements SchemeFactory {
++    public AlterPartitionsResponseStandardScheme getScheme() {
++      return new AlterPartitionsResponseStandardScheme();
++    }
++  }
++
++  private static class AlterPartitionsResponseStandardScheme extends StandardScheme<AlterPartitionsResponse> {
++
++    public void read(org.apache.thrift.protocol.TProtocol iprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
++      org.apache.thrift.protocol.TField schemeField;
++      iprot.readStructBegin();
++      while (true)
++      {
++        schemeField = iprot.readFieldBegin();
++        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
++          break;
++        }
++        switch (schemeField.id) {
++          default:
++            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++        }
++        iprot.readFieldEnd();
++      }
++      iprot.readStructEnd();
++      struct.validate();
++    }
++
++    public void write(org.apache.thrift.protocol.TProtocol oprot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
++      struct.validate();
++
++      oprot.writeStructBegin(STRUCT_DESC);
++      oprot.writeFieldStop();
++      oprot.writeStructEnd();
++    }
++
++  }
++
++  private static class AlterPartitionsResponseTupleSchemeFactory implements SchemeFactory {
++    public AlterPartitionsResponseTupleScheme getScheme() {
++      return new AlterPartitionsResponseTupleScheme();
++    }
++  }
++
++  private static class AlterPartitionsResponseTupleScheme extends TupleScheme<AlterPartitionsResponse> {
++
++    @Override
++    public void write(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
++      TTupleProtocol oprot = (TTupleProtocol) prot;
++    }
++
++    @Override
++    public void read(org.apache.thrift.protocol.TProtocol prot, AlterPartitionsResponse struct) throws org.apache.thrift.TException {
++      TTupleProtocol iprot = (TTupleProtocol) prot;
++    }
++  }
++
++}
++

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
index 0000000,6ce7214..87dc3f1
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
@@@ -1,0 -1,549 +1,863 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ColumnStatistics implements org.apache.thrift.TBase<ColumnStatistics, ColumnStatistics._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatistics> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatistics");
+ 
+   private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+   private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)4);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)5);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new ColumnStatisticsStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new ColumnStatisticsTupleSchemeFactory());
+   }
+ 
+   private ColumnStatisticsDesc statsDesc; // required
+   private List<ColumnStatisticsObj> statsObj; // required
++  private long txnId; // optional
++  private String validWriteIdList; // optional
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     STATS_DESC((short)1, "statsDesc"),
 -    STATS_OBJ((short)2, "statsObj");
++    STATS_OBJ((short)2, "statsObj"),
++    TXN_ID((short)3, "txnId"),
++    VALID_WRITE_ID_LIST((short)4, "validWriteIdList"),
++    IS_STATS_COMPLIANT((short)5, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // STATS_DESC
+           return STATS_DESC;
+         case 2: // STATS_OBJ
+           return STATS_OBJ;
++        case 3: // TXN_ID
++          return TXN_ID;
++        case 4: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
++        case 5: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
++  private static final int __TXNID_ISSET_ID = 0;
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST,_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.STATS_DESC, new org.apache.thrift.meta_data.FieldMetaData("statsDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsDesc.class)));
+     tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap);
+   }
+ 
+   public ColumnStatistics() {
++    this.txnId = -1L;
++
+   }
+ 
+   public ColumnStatistics(
+     ColumnStatisticsDesc statsDesc,
+     List<ColumnStatisticsObj> statsObj)
+   {
+     this();
+     this.statsDesc = statsDesc;
+     this.statsObj = statsObj;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public ColumnStatistics(ColumnStatistics other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetStatsDesc()) {
+       this.statsDesc = new ColumnStatisticsDesc(other.statsDesc);
+     }
+     if (other.isSetStatsObj()) {
+       List<ColumnStatisticsObj> __this__statsObj = new ArrayList<ColumnStatisticsObj>(other.statsObj.size());
+       for (ColumnStatisticsObj other_element : other.statsObj) {
+         __this__statsObj.add(new ColumnStatisticsObj(other_element));
+       }
+       this.statsObj = __this__statsObj;
+     }
++    this.txnId = other.txnId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public ColumnStatistics deepCopy() {
+     return new ColumnStatistics(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.statsDesc = null;
+     this.statsObj = null;
++    this.txnId = -1L;
++
++    this.validWriteIdList = null;
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public ColumnStatisticsDesc getStatsDesc() {
+     return this.statsDesc;
+   }
+ 
+   public void setStatsDesc(ColumnStatisticsDesc statsDesc) {
+     this.statsDesc = statsDesc;
+   }
+ 
+   public void unsetStatsDesc() {
+     this.statsDesc = null;
+   }
+ 
+   /** Returns true if field statsDesc is set (has been assigned a value) and false otherwise */
+   public boolean isSetStatsDesc() {
+     return this.statsDesc != null;
+   }
+ 
+   public void setStatsDescIsSet(boolean value) {
+     if (!value) {
+       this.statsDesc = null;
+     }
+   }
+ 
+   public int getStatsObjSize() {
+     return (this.statsObj == null) ? 0 : this.statsObj.size();
+   }
+ 
+   public java.util.Iterator<ColumnStatisticsObj> getStatsObjIterator() {
+     return (this.statsObj == null) ? null : this.statsObj.iterator();
+   }
+ 
+   public void addToStatsObj(ColumnStatisticsObj elem) {
+     if (this.statsObj == null) {
+       this.statsObj = new ArrayList<ColumnStatisticsObj>();
+     }
+     this.statsObj.add(elem);
+   }
+ 
+   public List<ColumnStatisticsObj> getStatsObj() {
+     return this.statsObj;
+   }
+ 
+   public void setStatsObj(List<ColumnStatisticsObj> statsObj) {
+     this.statsObj = statsObj;
+   }
+ 
+   public void unsetStatsObj() {
+     this.statsObj = null;
+   }
+ 
+   /** Returns true if field statsObj is set (has been assigned a value) and false otherwise */
+   public boolean isSetStatsObj() {
+     return this.statsObj != null;
+   }
+ 
+   public void setStatsObjIsSet(boolean value) {
+     if (!value) {
+       this.statsObj = null;
+     }
+   }
+ 
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case STATS_DESC:
+       if (value == null) {
+         unsetStatsDesc();
+       } else {
+         setStatsDesc((ColumnStatisticsDesc)value);
+       }
+       break;
+ 
+     case STATS_OBJ:
+       if (value == null) {
+         unsetStatsObj();
+       } else {
+         setStatsObj((List<ColumnStatisticsObj>)value);
+       }
+       break;
+ 
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case STATS_DESC:
+       return getStatsDesc();
+ 
+     case STATS_OBJ:
+       return getStatsObj();
+ 
++    case TXN_ID:
++      return getTxnId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case STATS_DESC:
+       return isSetStatsDesc();
+     case STATS_OBJ:
+       return isSetStatsObj();
++    case TXN_ID:
++      return isSetTxnId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof ColumnStatistics)
+       return this.equals((ColumnStatistics)that);
+     return false;
+   }
+ 
+   public boolean equals(ColumnStatistics that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_statsDesc = true && this.isSetStatsDesc();
+     boolean that_present_statsDesc = true && that.isSetStatsDesc();
+     if (this_present_statsDesc || that_present_statsDesc) {
+       if (!(this_present_statsDesc && that_present_statsDesc))
+         return false;
+       if (!this.statsDesc.equals(that.statsDesc))
+         return false;
+     }
+ 
+     boolean this_present_statsObj = true && this.isSetStatsObj();
+     boolean that_present_statsObj = true && that.isSetStatsObj();
+     if (this_present_statsObj || that_present_statsObj) {
+       if (!(this_present_statsObj && that_present_statsObj))
+         return false;
+       if (!this.statsObj.equals(that.statsObj))
+         return false;
+     }
+ 
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_statsDesc = true && (isSetStatsDesc());
+     list.add(present_statsDesc);
+     if (present_statsDesc)
+       list.add(statsDesc);
+ 
+     boolean present_statsObj = true && (isSetStatsObj());
+     list.add(present_statsObj);
+     if (present_statsObj)
+       list.add(statsObj);
+ 
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(ColumnStatistics other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetStatsDesc()).compareTo(other.isSetStatsDesc());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetStatsDesc()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statsDesc, other.statsDesc);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetStatsObj()).compareTo(other.isSetStatsObj());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetStatsObj()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statsObj, other.statsObj);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("ColumnStatistics(");
+     boolean first = true;
+ 
+     sb.append("statsDesc:");
+     if (this.statsDesc == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.statsDesc);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("statsObj:");
+     if (this.statsObj == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.statsObj);
+     }
+     first = false;
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetStatsDesc()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'statsDesc' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetStatsObj()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'statsObj' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+     if (statsDesc != null) {
+       statsDesc.validate();
+     }
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class ColumnStatisticsStandardSchemeFactory implements SchemeFactory {
+     public ColumnStatisticsStandardScheme getScheme() {
+       return new ColumnStatisticsStandardScheme();
+     }
+   }
+ 
+   private static class ColumnStatisticsStandardScheme extends StandardScheme<ColumnStatistics> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // STATS_DESC
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.statsDesc = new ColumnStatisticsDesc();
+               struct.statsDesc.read(iprot);
+               struct.setStatsDescIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // STATS_OBJ
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list268 = iprot.readListBegin();
+                 struct.statsObj = new ArrayList<ColumnStatisticsObj>(_list268.size);
+                 ColumnStatisticsObj _elem269;
+                 for (int _i270 = 0; _i270 < _list268.size; ++_i270)
+                 {
+                   _elem269 = new ColumnStatisticsObj();
+                   _elem269.read(iprot);
+                   struct.statsObj.add(_elem269);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setStatsObjIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 3: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 5: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.statsDesc != null) {
+         oprot.writeFieldBegin(STATS_DESC_FIELD_DESC);
+         struct.statsDesc.write(oprot);
+         oprot.writeFieldEnd();
+       }
+       if (struct.statsObj != null) {
+         oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.statsObj.size()));
+           for (ColumnStatisticsObj _iter271 : struct.statsObj)
+           {
+             _iter271.write(oprot);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class ColumnStatisticsTupleSchemeFactory implements SchemeFactory {
+     public ColumnStatisticsTupleScheme getScheme() {
+       return new ColumnStatisticsTupleScheme();
+     }
+   }
+ 
+   private static class ColumnStatisticsTupleScheme extends TupleScheme<ColumnStatistics> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       struct.statsDesc.write(oprot);
+       {
+         oprot.writeI32(struct.statsObj.size());
+         for (ColumnStatisticsObj _iter272 : struct.statsObj)
+         {
+           _iter272.write(oprot);
+         }
+       }
++      BitSet optionals = new BitSet();
++      if (struct.isSetTxnId()) {
++        optionals.set(0);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(1);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(2);
++      }
++      oprot.writeBitSet(optionals, 3);
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       struct.statsDesc = new ColumnStatisticsDesc();
+       struct.statsDesc.read(iprot);
+       struct.setStatsDescIsSet(true);
+       {
+         org.apache.thrift.protocol.TList _list273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+         struct.statsObj = new ArrayList<ColumnStatisticsObj>(_list273.size);
+         ColumnStatisticsObj _elem274;
+         for (int _i275 = 0; _i275 < _list273.size; ++_i275)
+         {
+           _elem274 = new ColumnStatisticsObj();
+           _elem274.read(iprot);
+           struct.statsObj.add(_elem274);
+         }
+       }
+       struct.setStatsObjIsSet(true);
++      BitSet incoming = iprot.readBitSet(3);
++      if (incoming.get(0)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(1)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[63/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/resources/package.jdo
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/resources/package.jdo
index 0000000,5fb548c..70150da
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/resources/package.jdo
+++ b/standalone-metastore/metastore-common/src/main/resources/package.jdo
@@@ -1,0 -1,1420 +1,1426 @@@
+ <?xml version="1.0"?>
+ <!--
+   Licensed to the Apache Software Foundation (ASF) under one
+   or more contributor license agreements.  See the NOTICE file
+   distributed with this work for additional information
+   regarding copyright ownership.  The ASF licenses this file
+   to you under the Apache License, Version 2.0 (the
+   "License"); you may not use this file except in compliance
+   with the License.  You may obtain a copy of the License at
+  
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+ --> 
+ <!DOCTYPE jdo PUBLIC "-//Sun Microsystems, Inc.//DTD Java Data Objects Metadata 2.0//EN"
+   "http://java.sun.com/dtd/jdo_2_0.dtd">
+ <!--
+   Size Limitations:
+ 
+   Indexed VARCHAR: 767 bytes (MySQL running on InnoDB Engine http://bugs.mysql.com/bug.php?id=13315)
+   Non-indexed VARCHAR: 4000 bytes (max length on Oracle 9i/10g/11g)
+ 
+ -->
+ <jdo>
+   <package name="org.apache.hadoop.hive.metastore.model">
+     <class name="MDatabase" identity-type="datastore" table="DBS" detachable="true">  
+       <datastore-identity>
+         <column name="DB_ID"/>
+       </datastore-identity>
+       <index name="UniqueDatabase" unique="true">
+         <column name="NAME"/>
+         <column name="CTLG_NAME"/>
+       </index>
+       <field name="name">  
+         <column name="NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="catalogName">
+         <column name="CTLG_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="description">
+         <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="locationUri">
+         <column name="DB_LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="parameters" table="DATABASE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="DB_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="180" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="ownerName">    
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+        <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MCatalog" identity-type="datastore" table="CTLGS" detachable="true">
+       <datastore-identity>
+         <column name="CTLG_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="256" jdbc-type="VARCHAR"/>
+         <index name="UniqueCatalog" unique="true"/>
+       </field>
+       <field name="description">
+         <column name="DESC" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="locationUri">
+         <column name="LOCATION_URI" length="4000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MFieldSchema" embedded-only="true" table="TYPE_FIELDS" detachable="true">
+       <field name="name">
+         <column name="FNAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="type" >
+         <column name="FTYPE" length="32672" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="comment" >
+         <column name="FCOMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MType" table="TYPES" detachable="true">  
+       <field name="name" >  
+         <column name="TYPE_NAME" length="128" jdbc-type="VARCHAR"/>  
+         <index name="UniqueType" unique="true"/>
+       </field>
+       <field name="type1">  
+         <column name="TYPE1" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="type2">  
+         <column name="TYPE2" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="fields" table="TYPE_FIELDS" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="TYPE_FIELDS_PK">
+             <column name="TYPE_NAME"/>
+             <column name="FIELD_NAME"/>
+           </primary-key>
+           <column name="TYPE_NAME"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="FIELD_NAME" length="128" jdbc-type="VARCHAR"/>
+             </field>
+             <field name="type">
+               <column name="FIELD_TYPE" length="767" jdbc-type="VARCHAR"  allows-null="false"/>
+             </field>
+             <field name="comment" >
+               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+     <class name="MTable" table="TBLS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="TBL_ID"/>
+       </datastore-identity>
+       <index name="UniqueTable" unique="true">
+         <column name="TBL_NAME"/>
+         <column name="DB_ID"/>
+       </index>
+       <field name="tableName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID"/>
+       </field>
+       <field name="partitionKeys" table="PARTITION_KEYS" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="PARTITION_KEY_PK">
+             <column name="TBL_ID"/>
+             <column name="PKEY_NAME"/>
+           </primary-key>
+           <column name="TBL_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="PKEY_NAME" length="128" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="type">
+               <column name="PKEY_TYPE" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+             </field>
+             <field name="comment" >
+               <column name="PKEY_COMMENT" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="owner">
+         <column name="OWNER" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="retention">
+         <column name="RETENTION" jdbc-type="integer"/>
+       </field>
+       <field name="parameters" table="TABLE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="TBL_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="viewOriginalText" default-fetch-group="false">
+         <column name="VIEW_ORIGINAL_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="viewExpandedText" default-fetch-group="false">
+         <column name="VIEW_EXPANDED_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="rewriteEnabled">
+         <column name="IS_REWRITE_ENABLED"/>
+       </field>
+       <field name="tableType">
+         <column name="TBL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
++      <field name="writeId">
++        <column name="WRITE_ID"/>
++      </field>
+     </class>
+ 
+     <class name="MCreationMetadata" identity-type="datastore" table="MV_CREATION_METADATA" detachable="true">
+       <datastore-identity>
+         <column name="MV_CREATION_METADATA_ID"/>
+       </datastore-identity>
+       <field name="catalogName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tblName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tables" table="MV_TABLES_USED">
+         <collection element-type="MTable"/>
+         <join>
+           <column name="MV_CREATION_METADATA_ID"/>
+         </join>
+         <element column="TBL_ID"/>
+       </field>
+       <field name="txnList">
+         <column name="TXN_LIST" jdbc-type="CLOB" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MConstraint" identity-type="application" table="KEY_CONSTRAINTS" detachable="true" objectid-class="MConstraint$PK">
+       <field name="constraintName" primary-key="true">
+         <column name="CONSTRAINT_NAME"/>
+       </field>
+       <field name="position" primary-key="true">
+         <column name="POSITION"/>
+       </field>
+       <field name="childColumn">
+         <column name="CHILD_CD_ID"/>
+       </field>
+       <field name="childIntegerIndex">
+         <column name="CHILD_INTEGER_IDX"/>
+       </field>
+       <field name="childTable">
+         <column name="CHILD_TBL_ID"/>
+       </field>
+       <field name="parentColumn">
+         <column name="PARENT_CD_ID"/>
+       </field>
+       <field name="parentIntegerIndex">
+     <column name="PARENT_INTEGER_IDX"/>
+       </field>
+       <field name="parentTable">
+         <column name="PARENT_TBL_ID"/>
+       </field>
+       <field name="constraintType">
+         <column name="CONSTRAINT_TYPE"/>
+       </field>
+       <field name="deleteRule">
+         <column name="DELETE_RULE"/>
+       </field>
+       <field name="updateRule">
+         <column name="UPDATE_RULE"/>
+       </field>
+       <field name="enableValidateRely">
+         <column name="ENABLE_VALIDATE_RELY"/>
+       </field>
+     </class>
+ 
+     <class name="MSerDeInfo" identity-type="datastore" table="SERDES" detachable="true">
+       <datastore-identity>
+         <column name="SERDE_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="serializationLib">
+         <column name="SLIB" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="parameters" table="SERDE_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="SERDE_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="serializerClass">
+         <column name="SERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="deserializerClass">
+         <column name="DESERIALIZER_CLASS" length="4000" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="serdeType">
+         <column name="SERDE_TYPE" jdbc-type="integer" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MOrder" embedded-only="true" table="SORT_ORDER" detachable="true">
+       <field name="col">
+         <column name="COL_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="order">
+         <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MColumnDescriptor" identity-type="datastore" table="CDS" detachable="true">
+       <datastore-identity>
+         <column name="CD_ID"/>
+       </datastore-identity>
+       <field name="cols" table="COLUMNS_V2" >
+         <collection element-type="MFieldSchema"/>
+         <join>
+           <primary-key name="COLUMNS_PK">
+             <column name="COLUMN_NAME"/>
+           </primary-key>
+           <column name="CD_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="name">
+               <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="type">
+               <column name="TYPE_NAME" length="32672" jdbc-type="VARCHAR" allows-null="false"/>
+             </field>
+             <field name="comment">
+               <column name="COMMENT" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+    <class name="MStringList" identity-type="datastore" table="Skewed_STRING_LIST" detachable="true">
+      <datastore-identity>
+        <column name="STRING_LIST_ID"/>
+      </datastore-identity>
+      <field name="internalList" table="Skewed_STRING_LIST_VALUES">
+        <collection element-type="java.lang.String"/>
+        <join>
+          <column name="STRING_LIST_ID"/>
+        </join>
+        <element column="STRING_LIST_VALUE"/>
+      </field>
+    </class>
+ 
+     <class name="MStorageDescriptor" identity-type="datastore" table="SDS" detachable="true">
+       <datastore-identity>
+         <column name="SD_ID"/>
+       </datastore-identity>
+       <field name="cd">
+           <column name="CD_ID"/>
+       </field>
+       <field name="location">
+         <column name="LOCATION" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="inputFormat">
+         <column name="INPUT_FORMAT" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="outputFormat">
+         <column name="OUTPUT_FORMAT" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="isCompressed">
+         <column name="IS_COMPRESSED"/>
+       </field>
+       <field name="isStoredAsSubDirectories">
+         <column name="IS_STOREDASSUBDIRECTORIES"/>
+       </field>
+       <field name="numBuckets">
+         <column name="NUM_BUCKETS" jdbc-type="integer"/>
+       </field>
+       <field name="serDeInfo" dependent="true">
+         <column name="SERDE_ID"/>
+       </field>
+       <field name="bucketCols" table="BUCKETING_COLS">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element column="BUCKET_COL_NAME"/>
+       </field>
+       <field name="sortCols" table="SORT_COLS">
+         <collection element-type="MOrder"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="col">
+               <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+               </field>
+             <field name="order">
+               <column name="ORDER" jdbc-type="INTEGER"  allows-null="false"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+       <field name="parameters" table="SD_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="32672" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+       <field name="skewedColNames" table="SKEWED_COL_NAMES">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <element column="SKEWED_COL_NAME"/>
+       </field>
+       <field name="skewedColValues" table="SKEWED_VALUES">
+         <collection element-type="MStringList"/>
+         <join/>
+       </field>
+       <field name="skewedColValueLocationMaps" table="SKEWED_COL_VALUE_LOC_MAP">
+         <map key-type="MStringList" value-type="java.lang.String"/>
+         <join>
+           <column name="SD_ID"/>
+         </join>
+         <value>
+           <column name="location" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+     </class>
+ 
+     <class name="MPartition" table="PARTITIONS" identity-type="datastore" detachable="true">
+       <index name="UniquePartition" unique="true">
+         <column name="PART_NAME"/>
+         <column name="TBL_ID"/>
+       </index>
+       <datastore-identity>
+         <column name="PART_ID"/>
+       </datastore-identity>
+       <field name="partitionName">
+         <column name="PART_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="values" table="PARTITION_KEY_VALS">
+         <collection element-type="java.lang.String"/>
+         <join>
+           <column name="PART_ID"/>
+         </join>
+         <element column="PART_KEY_VAL"/>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="parameters" table="PARTITION_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="PART_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
++      <field name="writeId">
++        <column name="WRITE_ID"/>
++      </field>
+     </class>
+     
+     <class name="MIndex" table="IDXS" identity-type="datastore" detachable="true">
+       <index name="UniqueINDEX" unique="true">
+         <column name="INDEX_NAME"/>
+         <column name="ORIG_TBL_ID"/>
+       </index>
+       
+       <datastore-identity>
+         <column name="INDEX_ID"/>
+       </datastore-identity>
+       <field name="indexName">
+         <column name="INDEX_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="origTable">
+         <column name="ORIG_TBL_ID"/>
+       </field>
+       <field name="indexTable">
+         <column name="INDEX_TBL_ID"/>
+       </field>
+       <field name="indexHandlerClass">
+         <column name="INDEX_HANDLER_CLASS" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="deferredRebuild">
+         <column name="DEFERRED_REBUILD"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="lastAccessTime">
+         <column name="LAST_ACCESS_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="sd" dependent="true">
+         <column name="SD_ID"/>
+       </field>
+       <field name="parameters" table="INDEX_PARAMS">
+         <map key-type="java.lang.String" value-type="java.lang.String"/>
+         <join>
+           <column name="INDEX_ID"/>
+         </join>
+         <key>
+            <column name="PARAM_KEY" length="256" jdbc-type="VARCHAR"/>
+         </key>
+         <value>
+            <column name="PARAM_VALUE" length="4000" jdbc-type="VARCHAR"/>
+         </value>
+       </field>
+     </class>
+ 
+     <class name="MRole" table="ROLES" identity-type="datastore" detachable="true">
+ 
+       <index name="RoleEntityINDEX" unique="true">
+         <column name="ROLE_NAME"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="ROLE_ID"/>
+       </datastore-identity>
+ 
+       <field name="roleName">
+         <column name="ROLE_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="ownerName">
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       
+     </class>
+ 
+     <class name="MRoleMap" table="ROLE_MAP" identity-type="datastore" detachable="true">
+       <index name="UserRoleMapINDEX" unique="true">
+         <column name="PRINCIPAL_NAME"/>
+         <column name="ROLE_ID"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="ROLE_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="role">
+         <column name="ROLE_ID" />
+       </field>
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="addTime">
+         <column name="ADD_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+     </class>
+ 
+     <class name="MGlobalPrivilege" table="GLOBAL_PRIVS" identity-type="datastore" detachable="true">
+       
+       <index name="GlobalPrivilegeIndex" unique="true">
+         <column name="AUTHORIZER"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="USER_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+     
+       <datastore-identity>
+         <column name="USER_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="USER_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MDBPrivilege" table="DB_PRIVS" identity-type="datastore" detachable="true">
+       
+       <index name="DBPrivilegeIndex" unique="true">
+         <column name="AUTHORIZER"/>
+         <column name="DB_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="DB_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="DB_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID" />
+       </field>
+       <field name="privilege">
+         <column name="DB_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MTablePrivilege" table="TBL_PRIVS" identity-type="datastore" detachable="true">
+     
+       <index name="TablePrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="TBL_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="TBL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="TBL_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="privilege">
+         <column name="TBL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionPrivilege" table="PART_PRIVS" identity-type="datastore" detachable="true">
+     
+       <index name="PartPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="PART_ID"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="PART_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="PART_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID" />
+       </field>
+       <field name="privilege">
+         <column name="PART_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MTableColumnPrivilege" table="TBL_COL_PRIVS" identity-type="datastore" detachable="true">
+     
+      <index name="TableColumnPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="TBL_ID"/>
+         <column name="COLUMN_NAME"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="TBL_COL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="TBL_COLUMN_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID" />
+       </field>
+       <field name="columnName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="TBL_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionColumnPrivilege" table="PART_COL_PRIVS" identity-type="datastore" detachable="true">
+     
+      <index name="PartitionColumnPrivilegeIndex" unique="false">
+         <column name="AUTHORIZER"/>
+         <column name="PART_ID"/>
+         <column name="COLUMN_NAME"/>
+         <column name="PRINCIPAL_NAME"/>
+         <column name="PRINCIPAL_TYPE"/>
+         <column name="PART_COL_PRIV"/>
+         <column name="GRANTOR"/>
+         <column name="GRANTOR_TYPE"/>
+       </index>
+ 
+       <datastore-identity>
+         <column name="PART_COLUMN_GRANT_ID"/>
+       </datastore-identity>
+ 
+       <field name="principalName">
+         <column name="PRINCIPAL_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="principalType">
+         <column name="PRINCIPAL_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID" />
+       </field>
+       <field name="columnName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="privilege">
+         <column name="PART_COL_PRIV" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="grantor">
+         <column name="GRANTOR" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantorType">
+         <column name="GRANTOR_TYPE" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="grantOption">
+         <column name="GRANT_OPTION" jdbc-type="SMALLINT"/>
+       </field>
+       <field name="authorizer">
+         <column name="AUTHORIZER" length="128" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionEvent"  table="PARTITION_EVENTS" identity-type="datastore" detachable="true">  
+        
+       <index name="PartitionEventIndex" unique="false">
+         <column name="PARTITION_NAME"/>
+       </index>
+       
+       <datastore-identity>
+         <column name="PART_NAME_ID"/>
+       </datastore-identity>
+       
+       <field name="catalogName">  
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="dbName">  
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="tblName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR"/>
+       </field>
+        <field name="partName">
+         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="eventType">
+         <column name="EVENT_TYPE"  jdbc-type="integer"/>
+       </field>
+      <field name="eventTime">
+         <column name="EVENT_TIME"  jdbc-type="BIGINT"/>
+       </field>
+ 
+     </class>
+     
+     <class name="MMasterKey" table="MASTER_KEYS" identity-type="application" detachable="true">
+ 
+       <field name="keyId" primary-key="true" value-strategy="native">
+         <column name="KEY_ID" jdbc-type="integer" />
+       </field>
+         
+       <field name="masterKey">
+         <column name="MASTER_KEY" length="767" jdbc-type="VARCHAR" />
+       </field>  
+       
+     </class>
+ 
+     <class name="MDelegationToken" table="DELEGATION_TOKENS" identity-type="application" detachable="true">
+ 
+       <field name="tokenIdentifier" primary-key="true">
+         <column name="TOKEN_IDENT" length="767" jdbc-type="VARCHAR" />
+       </field>  
+ 
+       <field name="tokenStr">
+         <column name="TOKEN" length="767" jdbc-type="VARCHAR" />
+       </field>
+             
+     </class>    
+ 
+     <class name="MTableColumnStatistics" table="TAB_COL_STATS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="CS_ID"/>
+       </datastore-identity>
+ 
+       <field name ="catName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableName">
+         <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="table">
+         <column name="TBL_ID"/>
+       </field>
+       <field name="colName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="colType">
+         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="longLowValue">
+         <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="longHighValue">
+         <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+        <field name="doubleLowValue">
+         <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="doubleHighValue">
+         <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="decimalLowValue">
+         <column name="BIG_DECIMAL_LOW_VALUE"  jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="decimalHighValue">
+         <column name="BIG_DECIMAL_HIGH_VALUE" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="numNulls">
+         <column name="NUM_NULLS" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="numDVs">
+         <column name="NUM_DISTINCTS" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="bitVector">
+         <column name="BIT_VECTOR" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+       <field name="avgColLen">
+         <column name="AVG_COL_LEN" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="maxColLen">
+         <column name="MAX_COL_LEN" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numTrues">
+         <column name="NUM_TRUES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numFalses">
+         <column name="NUM_FALSES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="lastAnalyzed">
+         <column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MPartitionColumnStatistics" table="PART_COL_STATS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="CS_ID"/>
+       </datastore-identity>
+ 
+       <field name ="catName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableName">
+         <column name="TABLE_NAME" length="256" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partitionName">
+         <column name="PARTITION_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partition">
+         <column name="PART_ID"/>
+       </field>
+       <field name="colName">
+         <column name="COLUMN_NAME" length="767" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="colType">
+         <column name="COLUMN_TYPE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="longLowValue">
+         <column name="LONG_LOW_VALUE"  jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="longHighValue">
+         <column name="LONG_HIGH_VALUE" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+        <field name="doubleLowValue">
+         <column name="DOUBLE_LOW_VALUE"  jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="doubleHighValue">
+         <column name="DOUBLE_HIGH_VALUE" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="decimalLowValue">
+         <column name="BIG_DECIMAL_LOW_VALUE"  jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="decimalHighValue">
+         <column name="BIG_DECIMAL_HIGH_VALUE" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="numNulls">
+         <column name="NUM_NULLS" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="numDVs">
+         <column name="NUM_DISTINCTS" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="bitVector">
+         <column name="BIT_VECTOR" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+       <field name="avgColLen">
+         <column name="AVG_COL_LEN" jdbc-type="DOUBLE" allows-null="true"/>
+       </field>
+       <field name="maxColLen">
+         <column name="MAX_COL_LEN" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numTrues">
+         <column name="NUM_TRUES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="numFalses">
+         <column name="NUM_FALSES" jdbc-type="BIGINT" allows-null="true"/>
+       </field>
+       <field name="lastAnalyzed">
+         <column name="LAST_ANALYZED" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+     <class name="MVersionTable" table="VERSION" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="VER_ID"/>
+       </datastore-identity>
+       <field name ="schemaVersion">
+         <column name="SCHEMA_VERSION" length="127" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="versionComment">
+         <column name="VERSION_COMMENT" length="255" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MMetastoreDBProperties" table="METASTORE_DB_PROPERTIES" identity-type="application" detachable="true">
+       <field name ="propertyKey" primary-key="true">
+         <column name="PROPERTY_KEY" length="255" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="propertyValue">
+         <column name="PROPERTY_VALUE" length="1000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name ="description">
+         <column name="DESCRIPTION" length="1000" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MResourceUri" embedded-only="true" table="RESOURCE_URI" detachable="true">
+       <field name="resourceType">
+         <column name="RESOURCE_TYPE" jdbc-type="INTEGER"/>
+       </field>
+       <field name="uri">
+         <column name="RESOURCE_URI" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+     </class>
+ 
+     <class name="MFunction" table="FUNCS" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="FUNC_ID"/>
+       </datastore-identity>
+       <index name="UniqueFunction" unique="true">
+         <column name="FUNC_NAME"/>
+         <column name="DB_ID"/>
+       </index>
+       <field name="functionName">
+         <column name="FUNC_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="database">
+         <column name="DB_ID"/>
+       </field>
+       <field name="functionType">
+         <column name="FUNC_TYPE" jdbc-type="integer"/>
+       </field>
+       <field name="className">
+         <column name="CLASS_NAME" length="4000" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="ownerName">    
+         <column name="OWNER_NAME" length="128" jdbc-type="VARCHAR"/>
+       </field>
+        <field name="ownerType">
+         <column name="OWNER_TYPE" length="10" jdbc-type="VARCHAR"/>
+       </field>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="resourceUris" table="FUNC_RU">
+         <collection element-type="MResourceUri"/>
+         <join>
+           <column name="FUNC_ID"/>
+         </join>
+         <element>
+           <embedded>
+             <field name="resourceType">
+               <column name="RESOURCE_TYPE" jdbc-type="INTEGER"/>
+             </field>
+             <field name="uri">
+               <column name="RESOURCE_URI" length="4000" jdbc-type="VARCHAR"/>
+             </field>
+           </embedded>
+         </element>
+       </field>
+     </class>
+ 
+     <class name="MNotificationLog" table="NOTIFICATION_LOG" identity-type="datastore" detachable="true">
+       <datastore-identity strategy="increment"/>
+       <datastore-identity key-cache-size="1"/>
+       <datastore-identity>
+         <column name="NL_ID"/>
+       </datastore-identity>
+       <field name="eventId">
+         <column name="EVENT_ID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="eventTime">
+         <column name="EVENT_TIME" jdbc-type="INTEGER" allows-null="false"/>
+       </field>
+       <field name="eventType">
+         <column name="EVENT_TYPE" length="32" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="catalogName">
+         <column name="CAT_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="dbName">
+         <column name="DB_NAME" length="128" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="tableName">
+         <column name="TBL_NAME" length="256" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+       <field name="message">
+         <column name="MESSAGE" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="messageFormat">
+         <column name="MESSAGE_FORMAT" length="16" jdbc-type="VARCHAR" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <!-- I tried to use a sequence here but derby didn't handle it well. -->
+     <class name="MNotificationNextId" table="NOTIFICATION_SEQUENCE" identity-type="datastore" detachable="true">
+       <datastore-identity>
+         <column name="NNI_ID"/>
+       </datastore-identity>
+       <field name="nextEventId">
+         <column name="NEXT_EVENT_ID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MTxnWriteNotificationLog" table="TXN_WRITE_NOTIFICATION_LOG" identity-type="datastore" detachable="true">
+       <datastore-identity strategy="increment"/>
+       <datastore-identity key-cache-size="1"/>
+       <datastore-identity>
+         <column name="WNL_ID"/>
+       </datastore-identity>
+       <field name="txnId">
+         <column name="WNL_TXNID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="writeId">
+         <column name="WNL_WRITEID" jdbc-type="BIGINT" allows-null="false"/>
+       </field>
+       <field name="database">
+         <column name="WNL_DATABASE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="table">
+         <column name="WNL_TABLE" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="partition">
+         <column name="WNL_PARTITION" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="tableObject">
+         <column name="WNL_TABLE_OBJ" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="partObject">
+         <column name="WNL_PARTITION_OBJ" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="files">
+         <column name="WNL_FILES" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="eventTime">
+         <column name="WNL_EVENT_TIME" jdbc-type="INTEGER" allows-null="false"/>
+       </field>
+     </class>
+ 
+     <class name="MWMResourcePlan" identity-type="datastore" table="WM_RESOURCEPLAN" detachable="true">
+       <datastore-identity>
+         <column name="RP_ID"/>
+       </datastore-identity>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="queryParallelism">
+         <column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="status">
+         <column name="STATUS" jdbc-type="string" allows-null="false"/>
+       </field>
+       <field name="defaultPool">
+         <column name="DEFAULT_POOL_ID" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <index name="UniqueResourcePlan" unique="true">
+         <column name="NAME"/>
+       </index>
+ 
+       <field name="pools" mapped-by="resourcePlan">
+         <collection element-type="MWMPool"/>
+       </field>
+       <field name="triggers" mapped-by="resourcePlan">
+         <collection element-type="MWMTrigger"/>
+       </field>
+       <field name="mappings" mapped-by="resourcePlan">
+         <collection element-type="MWMMapping"/>
+       </field>
+     </class>
+ 
+     <class name="MWMPool" identity-type="datastore" table="WM_POOL" detachable="true">
+       <datastore-identity>
+         <column name="POOL_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="path">
+         <column name="PATH" length="1024" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="allocFraction">
+         <column name="ALLOC_FRACTION" jdbc-type="double" allows-null="true"/>
+       </field>
+       <field name="queryParallelism">
+         <column name="QUERY_PARALLELISM" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="schedulingPolicy">
+         <column name="SCHEDULING_POLICY" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="triggers" table="WM_POOL_TO_TRIGGER">
+         <collection element-type="MWMTrigger" />
+         <join>
+           <column name="POOL_ID" />
+         </join>
+         <element>
+           <column name="TRIGGER_ID"/>
+         </element>
+       </field>
+       <index name="UniqueWMPool" unique="true">
+         <column name="RP_ID"/>
+         <column name="PATH"/>
+       </index>
+     </class>
+ 
+     <class name="MWMTrigger" identity-type="datastore" table="WM_TRIGGER" detachable="true">
+       <datastore-identity>
+         <column name="TRIGGER_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="name">
+         <column name="NAME" length="128" jdbc-type="VARCHAR" allows-null="false"/>
+       </field>
+       <field name="triggerExpression">
+         <column name="TRIGGER_EXPRESSION" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="actionExpression">
+         <column name="ACTION_EXPRESSION" jdbc-type="string" allows-null="true"/>
+       </field>
+       <field name="isInUnmanaged">
+         <column name="IS_IN_UNMANAGED" allows-null="false"/>
+       </field>
+       <field name="pools" mapped-by="triggers">
+         <collection element-type="MWMPool" />
+       </field>
+       <index name="UniqueWMTrigger" unique="true">
+         <column name="RP_ID"/>
+         <column name="NAME"/>
+       </index>
+     </class>
+ 
+     <class name="MWMMapping" identity-type="datastore" table="WM_MAPPING" detachable="true">
+       <datastore-identity>
+         <column name="MAPPING_ID"/>
+       </datastore-identity>
+       <field name="resourcePlan">
+         <column name="RP_ID" jdbc-type="integer" allows-null="false"/>
+       </field>
+       <field name="entityType">
+         <column name="ENTITY_TYPE" jdbc-type="string" length="128" />
+       </field>
+       <field name="entityName">
+         <column name="ENTITY_NAME" jdbc-type="string" length="128" />
+       </field>
+       <field name="pool">
+         <column name="POOL_ID" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <field name="ordering">
+         <column name="ORDERING" jdbc-type="integer" allows-null="true"/>
+       </field>
+       <index name="UniqueWMMapping" unique="true">
+         <column name="RP_ID"/>
+         <column name="ENTITY_TYPE"/>
+         <column name="ENTITY_NAME"/>
+       </index>
+     </class>
+ 
+     <class name="MISchema" identity-type="datastore" table="I_SCHEMA" detachable="true">
+       <datastore-identity>
+         <column name="SCHEMA_ID"/>
+       </datastore-identity>
+       <field name="schemaType">
+         <column name="SCHEMA_TYPE" jdbc-type="integer"/>
+       </field>
+       <field name="name">
+         <column name="NAME" jdbc-type="varchar" length="256"/>
+       </field>
+       <field name="db">
+         <column name="DB_ID"/>
+       </field>
+       <field name="compatibility">
+         <column name="COMPATIBILITY" jdbc-type="integer"/>
+       </field>
+       <field name="validationLevel">
+         <column name="VALIDATION_LEVEL" jdbc-type="integer"/>
+       </field>
+       <field name="canEvolve">
+         <column name="CAN_EVOLVE"/>
+       </field>
+       <field name="schemaGroup">
+         <column name="SCHEMA_GROUP" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+       </field>
+     </class>
+ 
+     <class name="MSchemaVersion" identity-type="datastore" table="SCHEMA_VERSION" detachable="true">
+       <datastore-identity>
+         <column name="SCHEMA_VERSION_ID"/>
+       </datastore-identity>
+       <field name="iSchema">
+         <column name="SCHEMA_ID"/>
+       </field>
+       <field name="version">
+         <column name="VERSION" jdbc-type="integer"/>
+       </field>
+       <field name="createdAt">
+         <column name="CREATED_AT" jdbc-type="bigint"/>
+       </field>
+       <field name="cols">
+           <column name="CD_ID"/>
+       </field>
+       <field name="state">
+         <column name="STATE" jdbc-type="integer"/>
+       </field>
+       <field name="description">
+         <column name="DESCRIPTION" jdbc-type="varchar" length="4000" allows-null="true"/>
+       </field>
+       <field name="schemaText" default-fetch-group="false">
+         <column name="SCHEMA_TEXT" jdbc-type="LONGVARCHAR"/>
+       </field>
+       <field name="fingerprint">
+         <column name="FINGERPRINT" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="name">
+         <column name="SCHEMA_VERSION_NAME" jdbc-type="varchar" length="256" allows-null="true"/>
+       </field>
+       <field name="serDe">
+         <column name="SERDE_ID"/>
+       </field>
+     </class>
+     <class name="MRuntimeStat" identity-type="datastore" table="RUNTIME_STATS" detachable="true">
+       <datastore-identity>
+         <column name="RS_ID"/>
+       </datastore-identity>
+       <field name="createTime">
+         <column name="CREATE_TIME" jdbc-type="integer"/>
+       </field>
+       <field name="weight">
+         <column name="WEIGHT" jdbc-type="integer"/>
+       </field>
+       <field name="payload">
+         <column name="PAYLOAD" jdbc-type="BLOB" allows-null="true"/>
+       </field>
+    </class>
+   </package>
+ </jdo>
+ 


[28/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java
new file mode 100644
index 0000000..2f2fcfa
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysRequest.java
@@ -0,0 +1,814 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ForeignKeysRequest implements org.apache.thrift.TBase<ForeignKeysRequest, ForeignKeysRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ForeignKeysRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ForeignKeysRequest");
+
+  private static final org.apache.thrift.protocol.TField PARENT_DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parent_db_name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField PARENT_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parent_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField FOREIGN_DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_db_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField FOREIGN_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ForeignKeysRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ForeignKeysRequestTupleSchemeFactory());
+  }
+
+  private String parent_db_name; // required
+  private String parent_tbl_name; // required
+  private String foreign_db_name; // required
+  private String foreign_tbl_name; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARENT_DB_NAME((short)1, "parent_db_name"),
+    PARENT_TBL_NAME((short)2, "parent_tbl_name"),
+    FOREIGN_DB_NAME((short)3, "foreign_db_name"),
+    FOREIGN_TBL_NAME((short)4, "foreign_tbl_name"),
+    CAT_NAME((short)5, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARENT_DB_NAME
+          return PARENT_DB_NAME;
+        case 2: // PARENT_TBL_NAME
+          return PARENT_TBL_NAME;
+        case 3: // FOREIGN_DB_NAME
+          return FOREIGN_DB_NAME;
+        case 4: // FOREIGN_TBL_NAME
+          return FOREIGN_TBL_NAME;
+        case 5: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARENT_DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("parent_db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARENT_TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("parent_tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.FOREIGN_DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreign_db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.FOREIGN_TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreign_tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ForeignKeysRequest.class, metaDataMap);
+  }
+
+  public ForeignKeysRequest() {
+  }
+
+  public ForeignKeysRequest(
+    String parent_db_name,
+    String parent_tbl_name,
+    String foreign_db_name,
+    String foreign_tbl_name)
+  {
+    this();
+    this.parent_db_name = parent_db_name;
+    this.parent_tbl_name = parent_tbl_name;
+    this.foreign_db_name = foreign_db_name;
+    this.foreign_tbl_name = foreign_tbl_name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ForeignKeysRequest(ForeignKeysRequest other) {
+    if (other.isSetParent_db_name()) {
+      this.parent_db_name = other.parent_db_name;
+    }
+    if (other.isSetParent_tbl_name()) {
+      this.parent_tbl_name = other.parent_tbl_name;
+    }
+    if (other.isSetForeign_db_name()) {
+      this.foreign_db_name = other.foreign_db_name;
+    }
+    if (other.isSetForeign_tbl_name()) {
+      this.foreign_tbl_name = other.foreign_tbl_name;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public ForeignKeysRequest deepCopy() {
+    return new ForeignKeysRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.parent_db_name = null;
+    this.parent_tbl_name = null;
+    this.foreign_db_name = null;
+    this.foreign_tbl_name = null;
+    this.catName = null;
+  }
+
+  public String getParent_db_name() {
+    return this.parent_db_name;
+  }
+
+  public void setParent_db_name(String parent_db_name) {
+    this.parent_db_name = parent_db_name;
+  }
+
+  public void unsetParent_db_name() {
+    this.parent_db_name = null;
+  }
+
+  /** Returns true if field parent_db_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetParent_db_name() {
+    return this.parent_db_name != null;
+  }
+
+  public void setParent_db_nameIsSet(boolean value) {
+    if (!value) {
+      this.parent_db_name = null;
+    }
+  }
+
+  public String getParent_tbl_name() {
+    return this.parent_tbl_name;
+  }
+
+  public void setParent_tbl_name(String parent_tbl_name) {
+    this.parent_tbl_name = parent_tbl_name;
+  }
+
+  public void unsetParent_tbl_name() {
+    this.parent_tbl_name = null;
+  }
+
+  /** Returns true if field parent_tbl_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetParent_tbl_name() {
+    return this.parent_tbl_name != null;
+  }
+
+  public void setParent_tbl_nameIsSet(boolean value) {
+    if (!value) {
+      this.parent_tbl_name = null;
+    }
+  }
+
+  public String getForeign_db_name() {
+    return this.foreign_db_name;
+  }
+
+  public void setForeign_db_name(String foreign_db_name) {
+    this.foreign_db_name = foreign_db_name;
+  }
+
+  public void unsetForeign_db_name() {
+    this.foreign_db_name = null;
+  }
+
+  /** Returns true if field foreign_db_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetForeign_db_name() {
+    return this.foreign_db_name != null;
+  }
+
+  public void setForeign_db_nameIsSet(boolean value) {
+    if (!value) {
+      this.foreign_db_name = null;
+    }
+  }
+
+  public String getForeign_tbl_name() {
+    return this.foreign_tbl_name;
+  }
+
+  public void setForeign_tbl_name(String foreign_tbl_name) {
+    this.foreign_tbl_name = foreign_tbl_name;
+  }
+
+  public void unsetForeign_tbl_name() {
+    this.foreign_tbl_name = null;
+  }
+
+  /** Returns true if field foreign_tbl_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetForeign_tbl_name() {
+    return this.foreign_tbl_name != null;
+  }
+
+  public void setForeign_tbl_nameIsSet(boolean value) {
+    if (!value) {
+      this.foreign_tbl_name = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARENT_DB_NAME:
+      if (value == null) {
+        unsetParent_db_name();
+      } else {
+        setParent_db_name((String)value);
+      }
+      break;
+
+    case PARENT_TBL_NAME:
+      if (value == null) {
+        unsetParent_tbl_name();
+      } else {
+        setParent_tbl_name((String)value);
+      }
+      break;
+
+    case FOREIGN_DB_NAME:
+      if (value == null) {
+        unsetForeign_db_name();
+      } else {
+        setForeign_db_name((String)value);
+      }
+      break;
+
+    case FOREIGN_TBL_NAME:
+      if (value == null) {
+        unsetForeign_tbl_name();
+      } else {
+        setForeign_tbl_name((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARENT_DB_NAME:
+      return getParent_db_name();
+
+    case PARENT_TBL_NAME:
+      return getParent_tbl_name();
+
+    case FOREIGN_DB_NAME:
+      return getForeign_db_name();
+
+    case FOREIGN_TBL_NAME:
+      return getForeign_tbl_name();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARENT_DB_NAME:
+      return isSetParent_db_name();
+    case PARENT_TBL_NAME:
+      return isSetParent_tbl_name();
+    case FOREIGN_DB_NAME:
+      return isSetForeign_db_name();
+    case FOREIGN_TBL_NAME:
+      return isSetForeign_tbl_name();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ForeignKeysRequest)
+      return this.equals((ForeignKeysRequest)that);
+    return false;
+  }
+
+  public boolean equals(ForeignKeysRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_parent_db_name = true && this.isSetParent_db_name();
+    boolean that_present_parent_db_name = true && that.isSetParent_db_name();
+    if (this_present_parent_db_name || that_present_parent_db_name) {
+      if (!(this_present_parent_db_name && that_present_parent_db_name))
+        return false;
+      if (!this.parent_db_name.equals(that.parent_db_name))
+        return false;
+    }
+
+    boolean this_present_parent_tbl_name = true && this.isSetParent_tbl_name();
+    boolean that_present_parent_tbl_name = true && that.isSetParent_tbl_name();
+    if (this_present_parent_tbl_name || that_present_parent_tbl_name) {
+      if (!(this_present_parent_tbl_name && that_present_parent_tbl_name))
+        return false;
+      if (!this.parent_tbl_name.equals(that.parent_tbl_name))
+        return false;
+    }
+
+    boolean this_present_foreign_db_name = true && this.isSetForeign_db_name();
+    boolean that_present_foreign_db_name = true && that.isSetForeign_db_name();
+    if (this_present_foreign_db_name || that_present_foreign_db_name) {
+      if (!(this_present_foreign_db_name && that_present_foreign_db_name))
+        return false;
+      if (!this.foreign_db_name.equals(that.foreign_db_name))
+        return false;
+    }
+
+    boolean this_present_foreign_tbl_name = true && this.isSetForeign_tbl_name();
+    boolean that_present_foreign_tbl_name = true && that.isSetForeign_tbl_name();
+    if (this_present_foreign_tbl_name || that_present_foreign_tbl_name) {
+      if (!(this_present_foreign_tbl_name && that_present_foreign_tbl_name))
+        return false;
+      if (!this.foreign_tbl_name.equals(that.foreign_tbl_name))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_parent_db_name = true && (isSetParent_db_name());
+    list.add(present_parent_db_name);
+    if (present_parent_db_name)
+      list.add(parent_db_name);
+
+    boolean present_parent_tbl_name = true && (isSetParent_tbl_name());
+    list.add(present_parent_tbl_name);
+    if (present_parent_tbl_name)
+      list.add(parent_tbl_name);
+
+    boolean present_foreign_db_name = true && (isSetForeign_db_name());
+    list.add(present_foreign_db_name);
+    if (present_foreign_db_name)
+      list.add(foreign_db_name);
+
+    boolean present_foreign_tbl_name = true && (isSetForeign_tbl_name());
+    list.add(present_foreign_tbl_name);
+    if (present_foreign_tbl_name)
+      list.add(foreign_tbl_name);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ForeignKeysRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetParent_db_name()).compareTo(other.isSetParent_db_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParent_db_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parent_db_name, other.parent_db_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetParent_tbl_name()).compareTo(other.isSetParent_tbl_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParent_tbl_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parent_tbl_name, other.parent_tbl_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetForeign_db_name()).compareTo(other.isSetForeign_db_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetForeign_db_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreign_db_name, other.foreign_db_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetForeign_tbl_name()).compareTo(other.isSetForeign_tbl_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetForeign_tbl_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreign_tbl_name, other.foreign_tbl_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ForeignKeysRequest(");
+    boolean first = true;
+
+    sb.append("parent_db_name:");
+    if (this.parent_db_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parent_db_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("parent_tbl_name:");
+    if (this.parent_tbl_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parent_tbl_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("foreign_db_name:");
+    if (this.foreign_db_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.foreign_db_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("foreign_tbl_name:");
+    if (this.foreign_tbl_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.foreign_tbl_name);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ForeignKeysRequestStandardSchemeFactory implements SchemeFactory {
+    public ForeignKeysRequestStandardScheme getScheme() {
+      return new ForeignKeysRequestStandardScheme();
+    }
+  }
+
+  private static class ForeignKeysRequestStandardScheme extends StandardScheme<ForeignKeysRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARENT_DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.parent_db_name = iprot.readString();
+              struct.setParent_db_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PARENT_TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.parent_tbl_name = iprot.readString();
+              struct.setParent_tbl_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // FOREIGN_DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.foreign_db_name = iprot.readString();
+              struct.setForeign_db_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // FOREIGN_TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.foreign_tbl_name = iprot.readString();
+              struct.setForeign_tbl_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.parent_db_name != null) {
+        oprot.writeFieldBegin(PARENT_DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.parent_db_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.parent_tbl_name != null) {
+        oprot.writeFieldBegin(PARENT_TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.parent_tbl_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.foreign_db_name != null) {
+        oprot.writeFieldBegin(FOREIGN_DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.foreign_db_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.foreign_tbl_name != null) {
+        oprot.writeFieldBegin(FOREIGN_TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.foreign_tbl_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ForeignKeysRequestTupleSchemeFactory implements SchemeFactory {
+    public ForeignKeysRequestTupleScheme getScheme() {
+      return new ForeignKeysRequestTupleScheme();
+    }
+  }
+
+  private static class ForeignKeysRequestTupleScheme extends TupleScheme<ForeignKeysRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetParent_db_name()) {
+        optionals.set(0);
+      }
+      if (struct.isSetParent_tbl_name()) {
+        optionals.set(1);
+      }
+      if (struct.isSetForeign_db_name()) {
+        optionals.set(2);
+      }
+      if (struct.isSetForeign_tbl_name()) {
+        optionals.set(3);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetParent_db_name()) {
+        oprot.writeString(struct.parent_db_name);
+      }
+      if (struct.isSetParent_tbl_name()) {
+        oprot.writeString(struct.parent_tbl_name);
+      }
+      if (struct.isSetForeign_db_name()) {
+        oprot.writeString(struct.foreign_db_name);
+      }
+      if (struct.isSetForeign_tbl_name()) {
+        oprot.writeString(struct.foreign_tbl_name);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.parent_db_name = iprot.readString();
+        struct.setParent_db_nameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.parent_tbl_name = iprot.readString();
+        struct.setParent_tbl_nameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.foreign_db_name = iprot.readString();
+        struct.setForeign_db_nameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.foreign_tbl_name = iprot.readString();
+        struct.setForeign_tbl_nameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java
new file mode 100644
index 0000000..2890506
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeysResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ForeignKeysResponse implements org.apache.thrift.TBase<ForeignKeysResponse, ForeignKeysResponse._Fields>, java.io.Serializable, Cloneable, Comparable<ForeignKeysResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ForeignKeysResponse");
+
+  private static final org.apache.thrift.protocol.TField FOREIGN_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeys", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ForeignKeysResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ForeignKeysResponseTupleSchemeFactory());
+  }
+
+  private List<SQLForeignKey> foreignKeys; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FOREIGN_KEYS((short)1, "foreignKeys");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FOREIGN_KEYS
+          return FOREIGN_KEYS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FOREIGN_KEYS, new org.apache.thrift.meta_data.FieldMetaData("foreignKeys", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLForeignKey.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ForeignKeysResponse.class, metaDataMap);
+  }
+
+  public ForeignKeysResponse() {
+  }
+
+  public ForeignKeysResponse(
+    List<SQLForeignKey> foreignKeys)
+  {
+    this();
+    this.foreignKeys = foreignKeys;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ForeignKeysResponse(ForeignKeysResponse other) {
+    if (other.isSetForeignKeys()) {
+      List<SQLForeignKey> __this__foreignKeys = new ArrayList<SQLForeignKey>(other.foreignKeys.size());
+      for (SQLForeignKey other_element : other.foreignKeys) {
+        __this__foreignKeys.add(new SQLForeignKey(other_element));
+      }
+      this.foreignKeys = __this__foreignKeys;
+    }
+  }
+
+  public ForeignKeysResponse deepCopy() {
+    return new ForeignKeysResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.foreignKeys = null;
+  }
+
+  public int getForeignKeysSize() {
+    return (this.foreignKeys == null) ? 0 : this.foreignKeys.size();
+  }
+
+  public java.util.Iterator<SQLForeignKey> getForeignKeysIterator() {
+    return (this.foreignKeys == null) ? null : this.foreignKeys.iterator();
+  }
+
+  public void addToForeignKeys(SQLForeignKey elem) {
+    if (this.foreignKeys == null) {
+      this.foreignKeys = new ArrayList<SQLForeignKey>();
+    }
+    this.foreignKeys.add(elem);
+  }
+
+  public List<SQLForeignKey> getForeignKeys() {
+    return this.foreignKeys;
+  }
+
+  public void setForeignKeys(List<SQLForeignKey> foreignKeys) {
+    this.foreignKeys = foreignKeys;
+  }
+
+  public void unsetForeignKeys() {
+    this.foreignKeys = null;
+  }
+
+  /** Returns true if field foreignKeys is set (has been assigned a value) and false otherwise */
+  public boolean isSetForeignKeys() {
+    return this.foreignKeys != null;
+  }
+
+  public void setForeignKeysIsSet(boolean value) {
+    if (!value) {
+      this.foreignKeys = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FOREIGN_KEYS:
+      if (value == null) {
+        unsetForeignKeys();
+      } else {
+        setForeignKeys((List<SQLForeignKey>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FOREIGN_KEYS:
+      return getForeignKeys();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FOREIGN_KEYS:
+      return isSetForeignKeys();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ForeignKeysResponse)
+      return this.equals((ForeignKeysResponse)that);
+    return false;
+  }
+
+  public boolean equals(ForeignKeysResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_foreignKeys = true && this.isSetForeignKeys();
+    boolean that_present_foreignKeys = true && that.isSetForeignKeys();
+    if (this_present_foreignKeys || that_present_foreignKeys) {
+      if (!(this_present_foreignKeys && that_present_foreignKeys))
+        return false;
+      if (!this.foreignKeys.equals(that.foreignKeys))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_foreignKeys = true && (isSetForeignKeys());
+    list.add(present_foreignKeys);
+    if (present_foreignKeys)
+      list.add(foreignKeys);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ForeignKeysResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetForeignKeys()).compareTo(other.isSetForeignKeys());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetForeignKeys()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignKeys, other.foreignKeys);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ForeignKeysResponse(");
+    boolean first = true;
+
+    sb.append("foreignKeys:");
+    if (this.foreignKeys == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.foreignKeys);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetForeignKeys()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'foreignKeys' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ForeignKeysResponseStandardSchemeFactory implements SchemeFactory {
+    public ForeignKeysResponseStandardScheme getScheme() {
+      return new ForeignKeysResponseStandardScheme();
+    }
+  }
+
+  private static class ForeignKeysResponseStandardScheme extends StandardScheme<ForeignKeysResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeysResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FOREIGN_KEYS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list328 = iprot.readListBegin();
+                struct.foreignKeys = new ArrayList<SQLForeignKey>(_list328.size);
+                SQLForeignKey _elem329;
+                for (int _i330 = 0; _i330 < _list328.size; ++_i330)
+                {
+                  _elem329 = new SQLForeignKey();
+                  _elem329.read(iprot);
+                  struct.foreignKeys.add(_elem329);
+                }
+                iprot.readListEnd();
+              }
+              struct.setForeignKeysIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeysResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.foreignKeys != null) {
+        oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size()));
+          for (SQLForeignKey _iter331 : struct.foreignKeys)
+          {
+            _iter331.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ForeignKeysResponseTupleSchemeFactory implements SchemeFactory {
+    public ForeignKeysResponseTupleScheme getScheme() {
+      return new ForeignKeysResponseTupleScheme();
+    }
+  }
+
+  private static class ForeignKeysResponseTupleScheme extends TupleScheme<ForeignKeysResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.foreignKeys.size());
+        for (SQLForeignKey _iter332 : struct.foreignKeys)
+        {
+          _iter332.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeysResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list333 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.foreignKeys = new ArrayList<SQLForeignKey>(_list333.size);
+        SQLForeignKey _elem334;
+        for (int _i335 = 0; _i335 < _list333.size; ++_i335)
+        {
+          _elem334 = new SQLForeignKey();
+          _elem334.read(iprot);
+          struct.foreignKeys.add(_elem334);
+        }
+      }
+      struct.setForeignKeysIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
new file mode 100644
index 0000000..a1c0de9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java
@@ -0,0 +1,1306 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Function implements org.apache.thrift.TBase<Function, Function._Fields>, java.io.Serializable, Cloneable, Comparable<Function> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Function");
+
+  private static final org.apache.thrift.protocol.TField FUNCTION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("functionName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField CLASS_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("className", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField OWNER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField FUNCTION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("functionType", org.apache.thrift.protocol.TType.I32, (short)7);
+  private static final org.apache.thrift.protocol.TField RESOURCE_URIS_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceUris", org.apache.thrift.protocol.TType.LIST, (short)8);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new FunctionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new FunctionTupleSchemeFactory());
+  }
+
+  private String functionName; // required
+  private String dbName; // required
+  private String className; // required
+  private String ownerName; // required
+  private PrincipalType ownerType; // required
+  private int createTime; // required
+  private FunctionType functionType; // required
+  private List<ResourceUri> resourceUris; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FUNCTION_NAME((short)1, "functionName"),
+    DB_NAME((short)2, "dbName"),
+    CLASS_NAME((short)3, "className"),
+    OWNER_NAME((short)4, "ownerName"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    OWNER_TYPE((short)5, "ownerType"),
+    CREATE_TIME((short)6, "createTime"),
+    /**
+     * 
+     * @see FunctionType
+     */
+    FUNCTION_TYPE((short)7, "functionType"),
+    RESOURCE_URIS((short)8, "resourceUris"),
+    CAT_NAME((short)9, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FUNCTION_NAME
+          return FUNCTION_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // CLASS_NAME
+          return CLASS_NAME;
+        case 4: // OWNER_NAME
+          return OWNER_NAME;
+        case 5: // OWNER_TYPE
+          return OWNER_TYPE;
+        case 6: // CREATE_TIME
+          return CREATE_TIME;
+        case 7: // FUNCTION_TYPE
+          return FUNCTION_TYPE;
+        case 8: // RESOURCE_URIS
+          return RESOURCE_URIS;
+        case 9: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CREATETIME_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FUNCTION_NAME, new org.apache.thrift.meta_data.FieldMetaData("functionName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CLASS_NAME, new org.apache.thrift.meta_data.FieldMetaData("className", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OWNER_NAME, new org.apache.thrift.meta_data.FieldMetaData("ownerName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.FUNCTION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("functionType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, FunctionType.class)));
+    tmpMap.put(_Fields.RESOURCE_URIS, new org.apache.thrift.meta_data.FieldMetaData("resourceUris", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ResourceUri.class))));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Function.class, metaDataMap);
+  }
+
+  public Function() {
+  }
+
+  public Function(
+    String functionName,
+    String dbName,
+    String className,
+    String ownerName,
+    PrincipalType ownerType,
+    int createTime,
+    FunctionType functionType,
+    List<ResourceUri> resourceUris)
+  {
+    this();
+    this.functionName = functionName;
+    this.dbName = dbName;
+    this.className = className;
+    this.ownerName = ownerName;
+    this.ownerType = ownerType;
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+    this.functionType = functionType;
+    this.resourceUris = resourceUris;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Function(Function other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetFunctionName()) {
+      this.functionName = other.functionName;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetClassName()) {
+      this.className = other.className;
+    }
+    if (other.isSetOwnerName()) {
+      this.ownerName = other.ownerName;
+    }
+    if (other.isSetOwnerType()) {
+      this.ownerType = other.ownerType;
+    }
+    this.createTime = other.createTime;
+    if (other.isSetFunctionType()) {
+      this.functionType = other.functionType;
+    }
+    if (other.isSetResourceUris()) {
+      List<ResourceUri> __this__resourceUris = new ArrayList<ResourceUri>(other.resourceUris.size());
+      for (ResourceUri other_element : other.resourceUris) {
+        __this__resourceUris.add(new ResourceUri(other_element));
+      }
+      this.resourceUris = __this__resourceUris;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public Function deepCopy() {
+    return new Function(this);
+  }
+
+  @Override
+  public void clear() {
+    this.functionName = null;
+    this.dbName = null;
+    this.className = null;
+    this.ownerName = null;
+    this.ownerType = null;
+    setCreateTimeIsSet(false);
+    this.createTime = 0;
+    this.functionType = null;
+    this.resourceUris = null;
+    this.catName = null;
+  }
+
+  public String getFunctionName() {
+    return this.functionName;
+  }
+
+  public void setFunctionName(String functionName) {
+    this.functionName = functionName;
+  }
+
+  public void unsetFunctionName() {
+    this.functionName = null;
+  }
+
+  /** Returns true if field functionName is set (has been assigned a value) and false otherwise */
+  public boolean isSetFunctionName() {
+    return this.functionName != null;
+  }
+
+  public void setFunctionNameIsSet(boolean value) {
+    if (!value) {
+      this.functionName = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getClassName() {
+    return this.className;
+  }
+
+  public void setClassName(String className) {
+    this.className = className;
+  }
+
+  public void unsetClassName() {
+    this.className = null;
+  }
+
+  /** Returns true if field className is set (has been assigned a value) and false otherwise */
+  public boolean isSetClassName() {
+    return this.className != null;
+  }
+
+  public void setClassNameIsSet(boolean value) {
+    if (!value) {
+      this.className = null;
+    }
+  }
+
+  public String getOwnerName() {
+    return this.ownerName;
+  }
+
+  public void setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+  }
+
+  public void unsetOwnerName() {
+    this.ownerName = null;
+  }
+
+  /** Returns true if field ownerName is set (has been assigned a value) and false otherwise */
+  public boolean isSetOwnerName() {
+    return this.ownerName != null;
+  }
+
+  public void setOwnerNameIsSet(boolean value) {
+    if (!value) {
+      this.ownerName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getOwnerType() {
+    return this.ownerType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setOwnerType(PrincipalType ownerType) {
+    this.ownerType = ownerType;
+  }
+
+  public void unsetOwnerType() {
+    this.ownerType = null;
+  }
+
+  /** Returns true if field ownerType is set (has been assigned a value) and false otherwise */
+  public boolean isSetOwnerType() {
+    return this.ownerType != null;
+  }
+
+  public void setOwnerTypeIsSet(boolean value) {
+    if (!value) {
+      this.ownerType = null;
+    }
+  }
+
+  public int getCreateTime() {
+    return this.createTime;
+  }
+
+  public void setCreateTime(int createTime) {
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+  }
+
+  public void unsetCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  public void setCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+  }
+
+  /**
+   * 
+   * @see FunctionType
+   */
+  public FunctionType getFunctionType() {
+    return this.functionType;
+  }
+
+  /**
+   * 
+   * @see FunctionType
+   */
+  public void setFunctionType(FunctionType functionType) {
+    this.functionType = functionType;
+  }
+
+  public void unsetFunctionType() {
+    this.functionType = null;
+  }
+
+  /** Returns true if field functionType is set (has been assigned a value) and false otherwise */
+  public boolean isSetFunctionType() {
+    return this.functionType != null;
+  }
+
+  public void setFunctionTypeIsSet(boolean value) {
+    if (!value) {
+      this.functionType = null;
+    }
+  }
+
+  public int getResourceUrisSize() {
+    return (this.resourceUris == null) ? 0 : this.resourceUris.size();
+  }
+
+  public java.util.Iterator<ResourceUri> getResourceUrisIterator() {
+    return (this.resourceUris == null) ? null : this.resourceUris.iterator();
+  }
+
+  public void addToResourceUris(ResourceUri elem) {
+    if (this.resourceUris == null) {
+      this.resourceUris = new ArrayList<ResourceUri>();
+    }
+    this.resourceUris.add(elem);
+  }
+
+  public List<ResourceUri> getResourceUris() {
+    return this.resourceUris;
+  }
+
+  public void setResourceUris(List<ResourceUri> resourceUris) {
+    this.resourceUris = resourceUris;
+  }
+
+  public void unsetResourceUris() {
+    this.resourceUris = null;
+  }
+
+  /** Returns true if field resourceUris is set (has been assigned a value) and false otherwise */
+  public boolean isSetResourceUris() {
+    return this.resourceUris != null;
+  }
+
+  public void setResourceUrisIsSet(boolean value) {
+    if (!value) {
+      this.resourceUris = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FUNCTION_NAME:
+      if (value == null) {
+        unsetFunctionName();
+      } else {
+        setFunctionName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case CLASS_NAME:
+      if (value == null) {
+        unsetClassName();
+      } else {
+        setClassName((String)value);
+      }
+      break;
+
+    case OWNER_NAME:
+      if (value == null) {
+        unsetOwnerName();
+      } else {
+        setOwnerName((String)value);
+      }
+      break;
+
+    case OWNER_TYPE:
+      if (value == null) {
+        unsetOwnerType();
+      } else {
+        setOwnerType((PrincipalType)value);
+      }
+      break;
+
+    case CREATE_TIME:
+      if (value == null) {
+        unsetCreateTime();
+      } else {
+        setCreateTime((Integer)value);
+      }
+      break;
+
+    case FUNCTION_TYPE:
+      if (value == null) {
+        unsetFunctionType();
+      } else {
+        setFunctionType((FunctionType)value);
+      }
+      break;
+
+    case RESOURCE_URIS:
+      if (value == null) {
+        unsetResourceUris();
+      } else {
+        setResourceUris((List<ResourceUri>)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FUNCTION_NAME:
+      return getFunctionName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case CLASS_NAME:
+      return getClassName();
+
+    case OWNER_NAME:
+      return getOwnerName();
+
+    case OWNER_TYPE:
+      return getOwnerType();
+
+    case CREATE_TIME:
+      return getCreateTime();
+
+    case FUNCTION_TYPE:
+      return getFunctionType();
+
+    case RESOURCE_URIS:
+      return getResourceUris();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FUNCTION_NAME:
+      return isSetFunctionName();
+    case DB_NAME:
+      return isSetDbName();
+    case CLASS_NAME:
+      return isSetClassName();
+    case OWNER_NAME:
+      return isSetOwnerName();
+    case OWNER_TYPE:
+      return isSetOwnerType();
+    case CREATE_TIME:
+      return isSetCreateTime();
+    case FUNCTION_TYPE:
+      return isSetFunctionType();
+    case RESOURCE_URIS:
+      return isSetResourceUris();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Function)
+      return this.equals((Function)that);
+    return false;
+  }
+
+  public boolean equals(Function that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_functionName = true && this.isSetFunctionName();
+    boolean that_present_functionName = true && that.isSetFunctionName();
+    if (this_present_functionName || that_present_functionName) {
+      if (!(this_present_functionName && that_present_functionName))
+        return false;
+      if (!this.functionName.equals(that.functionName))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_className = true && this.isSetClassName();
+    boolean that_present_className = true && that.isSetClassName();
+    if (this_present_className || that_present_className) {
+      if (!(this_present_className && that_present_className))
+        return false;
+      if (!this.className.equals(that.className))
+        return false;
+    }
+
+    boolean this_present_ownerName = true && this.isSetOwnerName();
+    boolean that_present_ownerName = true && that.isSetOwnerName();
+    if (this_present_ownerName || that_present_ownerName) {
+      if (!(this_present_ownerName && that_present_ownerName))
+        return false;
+      if (!this.ownerName.equals(that.ownerName))
+        return false;
+    }
+
+    boolean this_present_ownerType = true && this.isSetOwnerType();
+    boolean that_present_ownerType = true && that.isSetOwnerType();
+    if (this_present_ownerType || that_present_ownerType) {
+      if (!(this_present_ownerType && that_present_ownerType))
+        return false;
+      if (!this.ownerType.equals(that.ownerType))
+        return false;
+    }
+
+    boolean this_present_createTime = true;
+    boolean that_present_createTime = true;
+    if (this_present_createTime || that_present_createTime) {
+      if (!(this_present_createTime && that_present_createTime))
+        return false;
+      if (this.createTime != that.createTime)
+        return false;
+    }
+
+    boolean this_present_functionType = true && this.isSetFunctionType();
+    boolean that_present_functionType = true && that.isSetFunctionType();
+    if (this_present_functionType || that_present_functionType) {
+      if (!(this_present_functionType && that_present_functionType))
+        return false;
+      if (!this.functionType.equals(that.functionType))
+        return false;
+    }
+
+    boolean this_present_resourceUris = true && this.isSetResourceUris();
+    boolean that_present_resourceUris = true && that.isSetResourceUris();
+    if (this_present_resourceUris || that_present_resourceUris) {
+      if (!(this_present_resourceUris && that_present_resourceUris))
+        return false;
+      if (!this.resourceUris.equals(that.resourceUris))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_functionName = true && (isSetFunctionName());
+    list.add(present_functionName);
+    if (present_functionName)
+      list.add(functionName);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_className = true && (isSetClassName());
+    list.add(present_className);
+    if (present_className)
+      list.add(className);
+
+    boolean present_ownerName = true && (isSetOwnerName());
+    list.add(present_ownerName);
+    if (present_ownerName)
+      list.add(ownerName);
+
+    boolean present_ownerType = true && (isSetOwnerType());
+    list.add(present_ownerType);
+    if (present_ownerType)
+      list.add(ownerType.getValue());
+
+    boolean present_createTime = true;
+    list.add(present_createTime);
+    if (present_createTime)
+      list.add(createTime);
+
+    boolean present_functionType = true && (isSetFunctionType());
+    list.add(present_functionType);
+    if (present_functionType)
+      list.add(functionType.getValue());
+
+    boolean present_resourceUris = true && (isSetResourceUris());
+    list.add(present_resourceUris);
+    if (present_resourceUris)
+      list.add(resourceUris);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Function other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFunctionName()).compareTo(other.isSetFunctionName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFunctionName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.functionName, other.functionName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetClassName()).compareTo(other.isSetClassName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetClassName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.className, other.className);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOwnerName()).compareTo(other.isSetOwnerName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOwnerName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ownerName, other.ownerName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOwnerType()).compareTo(other.isSetOwnerType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOwnerType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ownerType, other.ownerType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFunctionType()).compareTo(other.isSetFunctionType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFunctionType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.functionType, other.functionType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetResourceUris()).compareTo(other.isSetResourceUris());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetResourceUris()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourceUris, other.resourceUris);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Function(");
+    boolean first = true;
+
+    sb.append("functionName:");
+    if (this.functionName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.functionName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("className:");
+    if (this.className == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.className);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("ownerName:");
+    if (this.ownerName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.ownerName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("ownerType:");
+    if (this.ownerType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.ownerType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("createTime:");
+    sb.append(this.createTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("functionType:");
+    if (this.functionType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.functionType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("resourceUris:");
+    if (this.resourceUris == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.resourceUris);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class FunctionStandardSchemeFactory implements SchemeFactory {
+    public FunctionStandardScheme getScheme() {
+      return new FunctionStandardScheme();
+    }
+  }
+
+  private static class FunctionStandardScheme extends StandardScheme<Function> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FUNCTION_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.functionName = iprot.readString();
+              struct.setFunctionNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CLASS_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.className = iprot.readString();
+              struct.setClassNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // OWNER_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.ownerName = iprot.readString();
+              struct.setOwnerNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // OWNER_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setOwnerTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.createTime = iprot.readI32();
+              struct.setCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // FUNCTION_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.functionType = org.apache.hadoop.hive.metastore.api.FunctionType.findByValue(iprot.readI32());
+              struct.setFunctionTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // RESOURCE_URIS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list546 = iprot.readListBegin();
+                struct.resourceUris = new ArrayList<ResourceUri>(_list546.size);
+                ResourceUri _elem547;
+                for (int _i548 = 0; _i548 < _list546.size; ++_i548)
+                {
+                  _elem547 = new ResourceUri();
+                  _elem547.read(iprot);
+                  struct.resourceUris.add(_elem547);
+                }
+                iprot.readListEnd();
+              }
+              struct.setResourceUrisIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.functionName != null) {
+        oprot.writeFieldBegin(FUNCTION_NAME_FIELD_DESC);
+        oprot.writeString(struct.functionName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.className != null) {
+        oprot.writeFieldBegin(CLASS_NAME_FIELD_DESC);
+        oprot.writeString(struct.className);
+        oprot.writeFieldEnd();
+      }
+      if (struct.ownerName != null) {
+        oprot.writeFieldBegin(OWNER_NAME_FIELD_DESC);
+        oprot.writeString(struct.ownerName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.ownerType != null) {
+        oprot.writeFieldBegin(OWNER_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.ownerType.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+      oprot.writeI32(struct.createTime);
+      oprot.writeFieldEnd();
+      if (struct.functionType != null) {
+        oprot.writeFieldBegin(FUNCTION_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.functionType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.resourceUris != null) {
+        oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size()));
+          for (ResourceUri _iter549 : struct.resourceUris)
+          {
+            _iter549.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class FunctionTupleSchemeFactory implements SchemeFactory {
+    public FunctionTupleScheme getScheme() {
+      return new FunctionTupleScheme();
+    }
+  }
+
+  private static class FunctionTupleScheme extends TupleScheme<Function> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetFunctionName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetDbName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetClassName()) {
+        optionals.set(2);
+      }
+      if (struct.isSetOwnerName()) {
+        optionals.set(3);
+      }
+      if (struct.isSetOwnerType()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCreateTime()) {
+        optionals.set(5);
+      }
+      if (struct.isSetFunctionType()) {
+        optionals.set(6);
+      }
+      if (struct.isSetResourceUris()) {
+        optionals.set(7);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(8);
+      }
+      oprot.writeBitSet(optionals, 9);
+      if (struct.isSetFunctionName()) {
+        oprot.writeString(struct.functionName);
+      }
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetClassName()) {
+        oprot.writeString(struct.className);
+      }
+      if (struct.isSetOwnerName()) {
+        oprot.writeString(struct.ownerName);
+      }
+      if (struct.isSetOwnerType()) {
+        oprot.writeI32(struct.ownerType.getValue());
+      }
+      if (struct.isSetCreateTime()) {
+        oprot.writeI32(struct.createTime);
+      }
+      if (struct.isSetFunctionType()) {
+        oprot.writeI32(struct.functionType.getValue());
+      }
+      if (struct.isSetResourceUris()) {
+        {
+          oprot.writeI32(struct.resourceUris.size());
+          for (ResourceUri _iter550 : struct.resourceUris)
+          {
+            _iter550.write(oprot);
+          }
+        }
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(9);
+      if (incoming.get(0)) {
+        struct.functionName = iprot.readString();
+        struct.setFunctionNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.className = iprot.readString();
+        struct.setClassNameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.ownerName = iprot.readString();
+        struct.setOwnerNameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setOwnerTypeIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.createTime = iprot.readI32();
+        struct.setCreateTimeIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.functionType = org.apache.hadoop.hive.metastore.api.FunctionType.findByValue(iprot.readI32());
+        struct.setFunctionTypeIsSet(true);
+      }
+      if (incoming.get(7)) {
+        {
+          org.apache.thrift.protocol.TList _list551 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.resourceUris = new ArrayList<ResourceUri>(_list551.size);
+          ResourceUri _elem552;
+          for (int _i553 = 0; _i553 < _list551.size; ++_i553)
+          {
+            _elem552 = new ResourceUri();
+            _elem552.read(iprot);
+            struct.resourceUris.add(_elem552);
+          }
+        }
+        struct.setResourceUrisIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+


[11/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
new file mode 100644
index 0000000..247fdaa
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
@@ -0,0 +1,932 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionSpec implements org.apache.thrift.TBase<PartitionSpec, PartitionSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpec> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpec");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField ROOT_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("rootPath", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+  private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionSpecStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionSpecTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tableName; // required
+  private String rootPath; // required
+  private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional
+  private PartitionListComposingSpec partitionList; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TABLE_NAME((short)2, "tableName"),
+    ROOT_PATH((short)3, "rootPath"),
+    SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"),
+    PARTITION_LIST((short)5, "partitionList"),
+    CAT_NAME((short)6, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TABLE_NAME
+          return TABLE_NAME;
+        case 3: // ROOT_PATH
+          return ROOT_PATH;
+        case 4: // SHARED_SDPARTITION_SPEC
+          return SHARED_SDPARTITION_SPEC;
+        case 5: // PARTITION_LIST
+          return PARTITION_LIST;
+        case 6: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ROOT_PATH, new org.apache.thrift.meta_data.FieldMetaData("rootPath", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SHARED_SDPARTITION_SPEC, new org.apache.thrift.meta_data.FieldMetaData("sharedSDPartitionSpec", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpecWithSharedSD.class)));
+    tmpMap.put(_Fields.PARTITION_LIST, new org.apache.thrift.meta_data.FieldMetaData("partitionList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap);
+  }
+
+  public PartitionSpec() {
+  }
+
+  public PartitionSpec(
+    String dbName,
+    String tableName,
+    String rootPath)
+  {
+    this();
+    this.dbName = dbName;
+    this.tableName = tableName;
+    this.rootPath = rootPath;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionSpec(PartitionSpec other) {
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetRootPath()) {
+      this.rootPath = other.rootPath;
+    }
+    if (other.isSetSharedSDPartitionSpec()) {
+      this.sharedSDPartitionSpec = new PartitionSpecWithSharedSD(other.sharedSDPartitionSpec);
+    }
+    if (other.isSetPartitionList()) {
+      this.partitionList = new PartitionListComposingSpec(other.partitionList);
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public PartitionSpec deepCopy() {
+    return new PartitionSpec(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tableName = null;
+    this.rootPath = null;
+    this.sharedSDPartitionSpec = null;
+    this.partitionList = null;
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public String getRootPath() {
+    return this.rootPath;
+  }
+
+  public void setRootPath(String rootPath) {
+    this.rootPath = rootPath;
+  }
+
+  public void unsetRootPath() {
+    this.rootPath = null;
+  }
+
+  /** Returns true if field rootPath is set (has been assigned a value) and false otherwise */
+  public boolean isSetRootPath() {
+    return this.rootPath != null;
+  }
+
+  public void setRootPathIsSet(boolean value) {
+    if (!value) {
+      this.rootPath = null;
+    }
+  }
+
+  public PartitionSpecWithSharedSD getSharedSDPartitionSpec() {
+    return this.sharedSDPartitionSpec;
+  }
+
+  public void setSharedSDPartitionSpec(PartitionSpecWithSharedSD sharedSDPartitionSpec) {
+    this.sharedSDPartitionSpec = sharedSDPartitionSpec;
+  }
+
+  public void unsetSharedSDPartitionSpec() {
+    this.sharedSDPartitionSpec = null;
+  }
+
+  /** Returns true if field sharedSDPartitionSpec is set (has been assigned a value) and false otherwise */
+  public boolean isSetSharedSDPartitionSpec() {
+    return this.sharedSDPartitionSpec != null;
+  }
+
+  public void setSharedSDPartitionSpecIsSet(boolean value) {
+    if (!value) {
+      this.sharedSDPartitionSpec = null;
+    }
+  }
+
+  public PartitionListComposingSpec getPartitionList() {
+    return this.partitionList;
+  }
+
+  public void setPartitionList(PartitionListComposingSpec partitionList) {
+    this.partitionList = partitionList;
+  }
+
+  public void unsetPartitionList() {
+    this.partitionList = null;
+  }
+
+  /** Returns true if field partitionList is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionList() {
+    return this.partitionList != null;
+  }
+
+  public void setPartitionListIsSet(boolean value) {
+    if (!value) {
+      this.partitionList = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case ROOT_PATH:
+      if (value == null) {
+        unsetRootPath();
+      } else {
+        setRootPath((String)value);
+      }
+      break;
+
+    case SHARED_SDPARTITION_SPEC:
+      if (value == null) {
+        unsetSharedSDPartitionSpec();
+      } else {
+        setSharedSDPartitionSpec((PartitionSpecWithSharedSD)value);
+      }
+      break;
+
+    case PARTITION_LIST:
+      if (value == null) {
+        unsetPartitionList();
+      } else {
+        setPartitionList((PartitionListComposingSpec)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case ROOT_PATH:
+      return getRootPath();
+
+    case SHARED_SDPARTITION_SPEC:
+      return getSharedSDPartitionSpec();
+
+    case PARTITION_LIST:
+      return getPartitionList();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case ROOT_PATH:
+      return isSetRootPath();
+    case SHARED_SDPARTITION_SPEC:
+      return isSetSharedSDPartitionSpec();
+    case PARTITION_LIST:
+      return isSetPartitionList();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionSpec)
+      return this.equals((PartitionSpec)that);
+    return false;
+  }
+
+  public boolean equals(PartitionSpec that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_rootPath = true && this.isSetRootPath();
+    boolean that_present_rootPath = true && that.isSetRootPath();
+    if (this_present_rootPath || that_present_rootPath) {
+      if (!(this_present_rootPath && that_present_rootPath))
+        return false;
+      if (!this.rootPath.equals(that.rootPath))
+        return false;
+    }
+
+    boolean this_present_sharedSDPartitionSpec = true && this.isSetSharedSDPartitionSpec();
+    boolean that_present_sharedSDPartitionSpec = true && that.isSetSharedSDPartitionSpec();
+    if (this_present_sharedSDPartitionSpec || that_present_sharedSDPartitionSpec) {
+      if (!(this_present_sharedSDPartitionSpec && that_present_sharedSDPartitionSpec))
+        return false;
+      if (!this.sharedSDPartitionSpec.equals(that.sharedSDPartitionSpec))
+        return false;
+    }
+
+    boolean this_present_partitionList = true && this.isSetPartitionList();
+    boolean that_present_partitionList = true && that.isSetPartitionList();
+    if (this_present_partitionList || that_present_partitionList) {
+      if (!(this_present_partitionList && that_present_partitionList))
+        return false;
+      if (!this.partitionList.equals(that.partitionList))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_rootPath = true && (isSetRootPath());
+    list.add(present_rootPath);
+    if (present_rootPath)
+      list.add(rootPath);
+
+    boolean present_sharedSDPartitionSpec = true && (isSetSharedSDPartitionSpec());
+    list.add(present_sharedSDPartitionSpec);
+    if (present_sharedSDPartitionSpec)
+      list.add(sharedSDPartitionSpec);
+
+    boolean present_partitionList = true && (isSetPartitionList());
+    list.add(present_partitionList);
+    if (present_partitionList)
+      list.add(partitionList);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionSpec other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRootPath()).compareTo(other.isSetRootPath());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRootPath()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rootPath, other.rootPath);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSharedSDPartitionSpec()).compareTo(other.isSetSharedSDPartitionSpec());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSharedSDPartitionSpec()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sharedSDPartitionSpec, other.sharedSDPartitionSpec);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionList()).compareTo(other.isSetPartitionList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionList, other.partitionList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionSpec(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("rootPath:");
+    if (this.rootPath == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.rootPath);
+    }
+    first = false;
+    if (isSetSharedSDPartitionSpec()) {
+      if (!first) sb.append(", ");
+      sb.append("sharedSDPartitionSpec:");
+      if (this.sharedSDPartitionSpec == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.sharedSDPartitionSpec);
+      }
+      first = false;
+    }
+    if (isSetPartitionList()) {
+      if (!first) sb.append(", ");
+      sb.append("partitionList:");
+      if (this.partitionList == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitionList);
+      }
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (sharedSDPartitionSpec != null) {
+      sharedSDPartitionSpec.validate();
+    }
+    if (partitionList != null) {
+      partitionList.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionSpecStandardSchemeFactory implements SchemeFactory {
+    public PartitionSpecStandardScheme getScheme() {
+      return new PartitionSpecStandardScheme();
+    }
+  }
+
+  private static class PartitionSpecStandardScheme extends StandardScheme<PartitionSpec> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // ROOT_PATH
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.rootPath = iprot.readString();
+              struct.setRootPathIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // SHARED_SDPARTITION_SPEC
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.sharedSDPartitionSpec = new PartitionSpecWithSharedSD();
+              struct.sharedSDPartitionSpec.read(iprot);
+              struct.setSharedSDPartitionSpecIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // PARTITION_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.partitionList = new PartitionListComposingSpec();
+              struct.partitionList.read(iprot);
+              struct.setPartitionListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.rootPath != null) {
+        oprot.writeFieldBegin(ROOT_PATH_FIELD_DESC);
+        oprot.writeString(struct.rootPath);
+        oprot.writeFieldEnd();
+      }
+      if (struct.sharedSDPartitionSpec != null) {
+        if (struct.isSetSharedSDPartitionSpec()) {
+          oprot.writeFieldBegin(SHARED_SDPARTITION_SPEC_FIELD_DESC);
+          struct.sharedSDPartitionSpec.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.partitionList != null) {
+        if (struct.isSetPartitionList()) {
+          oprot.writeFieldBegin(PARTITION_LIST_FIELD_DESC);
+          struct.partitionList.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionSpecTupleSchemeFactory implements SchemeFactory {
+    public PartitionSpecTupleScheme getScheme() {
+      return new PartitionSpecTupleScheme();
+    }
+  }
+
+  private static class PartitionSpecTupleScheme extends TupleScheme<PartitionSpec> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetDbName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTableName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetRootPath()) {
+        optionals.set(2);
+      }
+      if (struct.isSetSharedSDPartitionSpec()) {
+        optionals.set(3);
+      }
+      if (struct.isSetPartitionList()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(5);
+      }
+      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetTableName()) {
+        oprot.writeString(struct.tableName);
+      }
+      if (struct.isSetRootPath()) {
+        oprot.writeString(struct.rootPath);
+      }
+      if (struct.isSetSharedSDPartitionSpec()) {
+        struct.sharedSDPartitionSpec.write(oprot);
+      }
+      if (struct.isSetPartitionList()) {
+        struct.partitionList.write(oprot);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(6);
+      if (incoming.get(0)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.tableName = iprot.readString();
+        struct.setTableNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.rootPath = iprot.readString();
+        struct.setRootPathIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.sharedSDPartitionSpec = new PartitionSpecWithSharedSD();
+        struct.sharedSDPartitionSpec.read(iprot);
+        struct.setSharedSDPartitionSpecIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.partitionList = new PartitionListComposingSpec();
+        struct.partitionList.read(iprot);
+        struct.setPartitionListIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
new file mode 100644
index 0000000..a450cd4
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java
@@ -0,0 +1,558 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionSpecWithSharedSD implements org.apache.thrift.TBase<PartitionSpecWithSharedSD, PartitionSpecWithSharedSD._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpecWithSharedSD> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpecWithSharedSD");
+
+  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionSpecWithSharedSDStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionSpecWithSharedSDTupleSchemeFactory());
+  }
+
+  private List<PartitionWithoutSD> partitions; // required
+  private StorageDescriptor sd; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARTITIONS((short)1, "partitions"),
+    SD((short)2, "sd");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARTITIONS
+          return PARTITIONS;
+        case 2: // SD
+          return SD;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionWithoutSD.class))));
+    tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StorageDescriptor.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpecWithSharedSD.class, metaDataMap);
+  }
+
+  public PartitionSpecWithSharedSD() {
+  }
+
+  public PartitionSpecWithSharedSD(
+    List<PartitionWithoutSD> partitions,
+    StorageDescriptor sd)
+  {
+    this();
+    this.partitions = partitions;
+    this.sd = sd;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionSpecWithSharedSD(PartitionSpecWithSharedSD other) {
+    if (other.isSetPartitions()) {
+      List<PartitionWithoutSD> __this__partitions = new ArrayList<PartitionWithoutSD>(other.partitions.size());
+      for (PartitionWithoutSD other_element : other.partitions) {
+        __this__partitions.add(new PartitionWithoutSD(other_element));
+      }
+      this.partitions = __this__partitions;
+    }
+    if (other.isSetSd()) {
+      this.sd = new StorageDescriptor(other.sd);
+    }
+  }
+
+  public PartitionSpecWithSharedSD deepCopy() {
+    return new PartitionSpecWithSharedSD(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partitions = null;
+    this.sd = null;
+  }
+
+  public int getPartitionsSize() {
+    return (this.partitions == null) ? 0 : this.partitions.size();
+  }
+
+  public java.util.Iterator<PartitionWithoutSD> getPartitionsIterator() {
+    return (this.partitions == null) ? null : this.partitions.iterator();
+  }
+
+  public void addToPartitions(PartitionWithoutSD elem) {
+    if (this.partitions == null) {
+      this.partitions = new ArrayList<PartitionWithoutSD>();
+    }
+    this.partitions.add(elem);
+  }
+
+  public List<PartitionWithoutSD> getPartitions() {
+    return this.partitions;
+  }
+
+  public void setPartitions(List<PartitionWithoutSD> partitions) {
+    this.partitions = partitions;
+  }
+
+  public void unsetPartitions() {
+    this.partitions = null;
+  }
+
+  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitions() {
+    return this.partitions != null;
+  }
+
+  public void setPartitionsIsSet(boolean value) {
+    if (!value) {
+      this.partitions = null;
+    }
+  }
+
+  public StorageDescriptor getSd() {
+    return this.sd;
+  }
+
+  public void setSd(StorageDescriptor sd) {
+    this.sd = sd;
+  }
+
+  public void unsetSd() {
+    this.sd = null;
+  }
+
+  /** Returns true if field sd is set (has been assigned a value) and false otherwise */
+  public boolean isSetSd() {
+    return this.sd != null;
+  }
+
+  public void setSdIsSet(boolean value) {
+    if (!value) {
+      this.sd = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARTITIONS:
+      if (value == null) {
+        unsetPartitions();
+      } else {
+        setPartitions((List<PartitionWithoutSD>)value);
+      }
+      break;
+
+    case SD:
+      if (value == null) {
+        unsetSd();
+      } else {
+        setSd((StorageDescriptor)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARTITIONS:
+      return getPartitions();
+
+    case SD:
+      return getSd();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARTITIONS:
+      return isSetPartitions();
+    case SD:
+      return isSetSd();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionSpecWithSharedSD)
+      return this.equals((PartitionSpecWithSharedSD)that);
+    return false;
+  }
+
+  public boolean equals(PartitionSpecWithSharedSD that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partitions = true && this.isSetPartitions();
+    boolean that_present_partitions = true && that.isSetPartitions();
+    if (this_present_partitions || that_present_partitions) {
+      if (!(this_present_partitions && that_present_partitions))
+        return false;
+      if (!this.partitions.equals(that.partitions))
+        return false;
+    }
+
+    boolean this_present_sd = true && this.isSetSd();
+    boolean that_present_sd = true && that.isSetSd();
+    if (this_present_sd || that_present_sd) {
+      if (!(this_present_sd && that_present_sd))
+        return false;
+      if (!this.sd.equals(that.sd))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partitions = true && (isSetPartitions());
+    list.add(present_partitions);
+    if (present_partitions)
+      list.add(partitions);
+
+    boolean present_sd = true && (isSetSd());
+    list.add(present_sd);
+    if (present_sd)
+      list.add(sd);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionSpecWithSharedSD other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSd()).compareTo(other.isSetSd());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSd()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sd, other.sd);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionSpecWithSharedSD(");
+    boolean first = true;
+
+    sb.append("partitions:");
+    if (this.partitions == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partitions);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("sd:");
+    if (this.sd == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.sd);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (sd != null) {
+      sd.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionSpecWithSharedSDStandardSchemeFactory implements SchemeFactory {
+    public PartitionSpecWithSharedSDStandardScheme getScheme() {
+      return new PartitionSpecWithSharedSDStandardScheme();
+    }
+  }
+
+  private static class PartitionSpecWithSharedSDStandardScheme extends StandardScheme<PartitionSpecWithSharedSD> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpecWithSharedSD struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARTITIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list252 = iprot.readListBegin();
+                struct.partitions = new ArrayList<PartitionWithoutSD>(_list252.size);
+                PartitionWithoutSD _elem253;
+                for (int _i254 = 0; _i254 < _list252.size; ++_i254)
+                {
+                  _elem253 = new PartitionWithoutSD();
+                  _elem253.read(iprot);
+                  struct.partitions.add(_elem253);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // SD
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.sd = new StorageDescriptor();
+              struct.sd.read(iprot);
+              struct.setSdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpecWithSharedSD struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partitions != null) {
+        oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+          for (PartitionWithoutSD _iter255 : struct.partitions)
+          {
+            _iter255.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.sd != null) {
+        oprot.writeFieldBegin(SD_FIELD_DESC);
+        struct.sd.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionSpecWithSharedSDTupleSchemeFactory implements SchemeFactory {
+    public PartitionSpecWithSharedSDTupleScheme getScheme() {
+      return new PartitionSpecWithSharedSDTupleScheme();
+    }
+  }
+
+  private static class PartitionSpecWithSharedSDTupleScheme extends TupleScheme<PartitionSpecWithSharedSD> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSharedSD struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartitions()) {
+        optionals.set(0);
+      }
+      if (struct.isSetSd()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetPartitions()) {
+        {
+          oprot.writeI32(struct.partitions.size());
+          for (PartitionWithoutSD _iter256 : struct.partitions)
+          {
+            _iter256.write(oprot);
+          }
+        }
+      }
+      if (struct.isSetSd()) {
+        struct.sd.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSharedSD struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.partitions = new ArrayList<PartitionWithoutSD>(_list257.size);
+          PartitionWithoutSD _elem258;
+          for (int _i259 = 0; _i259 < _list257.size; ++_i259)
+          {
+            _elem258 = new PartitionWithoutSD();
+            _elem258.read(iprot);
+            struct.partitions.add(_elem258);
+          }
+        }
+        struct.setPartitionsIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.sd = new StorageDescriptor();
+        struct.sd.read(iprot);
+        struct.setSdIsSet(true);
+      }
+    }
+  }
+
+}
+


[69/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 0000000,8721022..aa29dd9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@@ -1,0 -1,12219 +1,12377 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.commons.lang.StringUtils.join;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Field;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.nio.ByteBuffer;
+ import java.sql.Connection;
+ import java.sql.SQLException;
+ import java.sql.SQLIntegrityConstraintViolationException;
+ import java.sql.Statement;
+ import java.time.LocalDateTime;
+ import java.time.format.DateTimeFormatter;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.LinkedList;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.TreeSet;
+ import java.util.UUID;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicBoolean;
+ import java.util.concurrent.locks.Lock;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.jdo.JDOCanRetryException;
+ import javax.jdo.JDODataStoreException;
+ import javax.jdo.JDOException;
+ import javax.jdo.JDOHelper;
+ import javax.jdo.JDOObjectNotFoundException;
+ import javax.jdo.PersistenceManager;
+ import javax.jdo.PersistenceManagerFactory;
+ import javax.jdo.Query;
+ import javax.jdo.Transaction;
+ import javax.jdo.datastore.DataStoreCache;
+ import javax.jdo.datastore.JDOConnection;
+ import javax.jdo.identity.IntIdentity;
+ import javax.sql.DataSource;
+ 
+ import com.google.common.base.Strings;
+ 
+ import org.apache.commons.collections.CollectionUtils;
+ import org.apache.commons.lang.ArrayUtils;
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.commons.lang.exception.ExceptionUtils;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.Path;
 -import org.apache.hadoop.hive.common.DatabaseName;
 -import org.apache.hadoop.hive.common.StatsSetupConst;
 -import org.apache.hadoop.hive.common.TableName;
++import org.apache.hadoop.hive.common.*;
+ import org.apache.hadoop.hive.metastore.MetaStoreDirectSql.SqlFilterForPushdown;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.FunctionType;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectType;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Order;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesRow;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
 -import org.apache.hadoop.hive.metastore.api.ResourceType;
 -import org.apache.hadoop.hive.metastore.api.ResourceUri;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaCompatibility;
 -import org.apache.hadoop.hive.metastore.api.SchemaType;
 -import org.apache.hadoop.hive.metastore.api.SchemaValidation;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionState;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.SerdeType;
 -import org.apache.hadoop.hive.metastore.api.SkewedInfo;
 -import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WMPoolTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlanStatus;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
 -import org.apache.hadoop.hive.metastore.model.MCatalog;
 -import org.apache.hadoop.hive.metastore.model.MColumnDescriptor;
 -import org.apache.hadoop.hive.metastore.model.MConstraint;
 -import org.apache.hadoop.hive.metastore.model.MCreationMetadata;
 -import org.apache.hadoop.hive.metastore.model.MDBPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MDatabase;
 -import org.apache.hadoop.hive.metastore.model.MDelegationToken;
 -import org.apache.hadoop.hive.metastore.model.MFieldSchema;
 -import org.apache.hadoop.hive.metastore.model.MFunction;
 -import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MISchema;
 -import org.apache.hadoop.hive.metastore.model.MMasterKey;
 -import org.apache.hadoop.hive.metastore.model.MMetastoreDBProperties;
 -import org.apache.hadoop.hive.metastore.model.MNotificationLog;
 -import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
 -import org.apache.hadoop.hive.metastore.model.MOrder;
 -import org.apache.hadoop.hive.metastore.model.MPartition;
 -import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics;
 -import org.apache.hadoop.hive.metastore.model.MPartitionEvent;
 -import org.apache.hadoop.hive.metastore.model.MPartitionPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MResourceUri;
 -import org.apache.hadoop.hive.metastore.model.MRole;
 -import org.apache.hadoop.hive.metastore.model.MRoleMap;
 -import org.apache.hadoop.hive.metastore.model.MRuntimeStat;
 -import org.apache.hadoop.hive.metastore.model.MSchemaVersion;
 -import org.apache.hadoop.hive.metastore.model.MSerDeInfo;
 -import org.apache.hadoop.hive.metastore.model.MStorageDescriptor;
 -import org.apache.hadoop.hive.metastore.model.MStringList;
 -import org.apache.hadoop.hive.metastore.model.MTable;
 -import org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege;
 -import org.apache.hadoop.hive.metastore.model.MTableColumnStatistics;
 -import org.apache.hadoop.hive.metastore.model.MTablePrivilege;
 -import org.apache.hadoop.hive.metastore.model.MType;
 -import org.apache.hadoop.hive.metastore.model.MVersionTable;
 -import org.apache.hadoop.hive.metastore.model.MWMMapping;
++import org.apache.hadoop.hive.metastore.model.*;
+ import org.apache.hadoop.hive.metastore.model.MWMMapping.EntityType;
 -import org.apache.hadoop.hive.metastore.model.MWMPool;
 -import org.apache.hadoop.hive.metastore.model.MWMResourcePlan;
+ import org.apache.hadoop.hive.metastore.model.MWMResourcePlan.Status;
 -import org.apache.hadoop.hive.metastore.model.MWMTrigger;
 -import org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
+ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
++import org.apache.hadoop.hive.metastore.txn.TxnDbUtil;
++import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
++import org.apache.hive.common.util.TxnIdUtils;
+ import org.apache.thrift.TException;
+ import org.datanucleus.AbstractNucleusContext;
+ import org.datanucleus.ClassLoaderResolver;
+ import org.datanucleus.ClassLoaderResolverImpl;
+ import org.datanucleus.NucleusContext;
+ import org.datanucleus.PropertyNames;
+ import org.datanucleus.api.jdo.JDOPersistenceManager;
+ import org.datanucleus.api.jdo.JDOPersistenceManagerFactory;
+ import org.datanucleus.store.rdbms.exceptions.MissingTableException;
+ import org.datanucleus.store.scostore.Store;
+ import org.datanucleus.util.WeakValueMap;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.codahale.metrics.Counter;
+ import com.codahale.metrics.MetricRegistry;
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Preconditions;
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+ import com.google.common.collect.Sets;
+ 
+ 
+ /**
+  * This class is the interface between the application logic and the database
+  * store that contains the objects. Refrain putting any logic in mode.M* objects
+  * or in this file as former could be auto generated and this class would need
+  * to be made into a interface that can read both from a database and a
+  * filestore.
+  */
+ public class ObjectStore implements RawStore, Configurable {
+   private static Properties prop = null;
+   private static PersistenceManagerFactory pmf = null;
+   private static boolean forTwoMetastoreTesting = false;
+   private int batchSize = Batchable.NO_BATCHING;
+ 
+   private static final DateTimeFormatter YMDHMS_FORMAT = DateTimeFormatter.ofPattern(
+       "yyyy_MM_dd_HH_mm_ss");
+ 
+   private static Lock pmfPropLock = new ReentrantLock();
+   /**
+   * Verify the schema only once per JVM since the db connection info is static
+   */
+   private final static AtomicBoolean isSchemaVerified = new AtomicBoolean(false);
+   private static final Logger LOG = LoggerFactory.getLogger(ObjectStore.class);
+ 
+   private enum TXN_STATUS {
+     NO_STATE, OPEN, COMMITED, ROLLBACK
+   }
+ 
+   private static final Map<String, Class<?>> PINCLASSMAP;
+   private static final String HOSTNAME;
+   private static final String USER;
+   private static final String JDO_PARAM = ":param";
+   static {
+     Map<String, Class<?>> map = new HashMap<>();
+     map.put("table", MTable.class);
+     map.put("storagedescriptor", MStorageDescriptor.class);
+     map.put("serdeinfo", MSerDeInfo.class);
+     map.put("partition", MPartition.class);
+     map.put("database", MDatabase.class);
+     map.put("type", MType.class);
+     map.put("fieldschema", MFieldSchema.class);
+     map.put("order", MOrder.class);
+     PINCLASSMAP = Collections.unmodifiableMap(map);
+     String hostname = "UNKNOWN";
+     try {
+       InetAddress clientAddr = InetAddress.getLocalHost();
+       hostname = clientAddr.getHostAddress();
+     } catch (IOException e) {
+     }
+     HOSTNAME = hostname;
+     String user = System.getenv("USER");
+     USER = org.apache.commons.lang.StringUtils.defaultString(user, "UNKNOWN");
+   }
+ 
+ 
+   private boolean isInitialized = false;
+   private PersistenceManager pm = null;
+   private SQLGenerator sqlGenerator = null;
+   private MetaStoreDirectSql directSql = null;
+   private DatabaseProduct dbType = null;
+   private PartitionExpressionProxy expressionProxy = null;
+   private Configuration conf;
+   private volatile int openTrasactionCalls = 0;
+   private Transaction currentTransaction = null;
+   private TXN_STATUS transactionStatus = TXN_STATUS.NO_STATE;
+   private Pattern partitionValidationPattern;
+   private Counter directSqlErrors;
+ 
+   /**
+    * A Autocloseable wrapper around Query class to pass the Query object to the caller and let the caller release
+    * the resources when the QueryWrapper goes out of scope
+    */
+   public static class QueryWrapper implements AutoCloseable {
+     public Query query;
+ 
+     /**
+      * Explicitly closes the query object to release the resources
+      */
+     @Override
+     public void close() {
+       if (query != null) {
+         query.closeAll();
+         query = null;
+       }
+     }
+   }
+ 
+   public ObjectStore() {
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   /**
+    * Called whenever this object is instantiated using ReflectionUtils, and also
+    * on connection retries. In cases of connection retries, conf will usually
+    * contain modified values.
+    */
+   @Override
+   @SuppressWarnings("nls")
+   public void setConf(Configuration conf) {
+     // Although an instance of ObjectStore is accessed by one thread, there may
+     // be many threads with ObjectStore instances. So the static variables
+     // pmf and prop need to be protected with locks.
+     pmfPropLock.lock();
+     try {
+       isInitialized = false;
+       this.conf = conf;
+       configureSSL(conf);
+       Properties propsFromConf = getDataSourceProps(conf);
+       boolean propsChanged = !propsFromConf.equals(prop);
+ 
+       if (propsChanged) {
+         if (pmf != null){
+           clearOutPmfClassLoaderCache(pmf);
+           if (!forTwoMetastoreTesting) {
+             // close the underlying connection pool to avoid leaks
+             pmf.close();
+           }
+         }
+         pmf = null;
+         prop = null;
+       }
+ 
+       assert(!isActiveTransaction());
+       shutdown();
+       // Always want to re-create pm as we don't know if it were created by the
+       // most recent instance of the pmf
+       pm = null;
+       directSql = null;
+       expressionProxy = null;
+       openTrasactionCalls = 0;
+       currentTransaction = null;
+       transactionStatus = TXN_STATUS.NO_STATE;
+ 
+       initialize(propsFromConf);
+ 
+       String partitionValidationRegex =
+           MetastoreConf.getVar(this.conf, ConfVars.PARTITION_NAME_WHITELIST_PATTERN);
+       if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) {
+         partitionValidationPattern = Pattern.compile(partitionValidationRegex);
+       } else {
+         partitionValidationPattern = null;
+       }
+ 
+       // Note, if metrics have not been initialized this will return null, which means we aren't
+       // using metrics.  Thus we should always check whether this is non-null before using.
+       MetricRegistry registry = Metrics.getRegistry();
+       if (registry != null) {
+         directSqlErrors = Metrics.getOrCreateCounter(MetricsConstants.DIRECTSQL_ERRORS);
+       }
+ 
+       this.batchSize = MetastoreConf.getIntVar(conf, ConfVars.RAWSTORE_PARTITION_BATCH_SIZE);
+ 
+       if (!isInitialized) {
+         throw new RuntimeException(
+         "Unable to create persistence manager. Check dss.log for details");
+       } else {
+         LOG.debug("Initialized ObjectStore");
+       }
+     } finally {
+       pmfPropLock.unlock();
+     }
+   }
+ 
+   private ClassLoader classLoader;
+   {
+     classLoader = Thread.currentThread().getContextClassLoader();
+     if (classLoader == null) {
+       classLoader = ObjectStore.class.getClassLoader();
+     }
+   }
+ 
+   @SuppressWarnings("nls")
+   private void initialize(Properties dsProps) {
+     int retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
+     long retryInterval = MetastoreConf.getTimeVar(conf,
+         ConfVars.HMS_HANDLER_INTERVAL, TimeUnit.MILLISECONDS);
+     int numTries = retryLimit;
+ 
+     while (numTries > 0){
+       try {
+         initializeHelper(dsProps);
+         return; // If we reach here, we succeed.
+       } catch (Exception e){
+         numTries--;
+         boolean retriable = isRetriableException(e);
+         if ((numTries > 0) && retriable){
+           LOG.info("Retriable exception while instantiating ObjectStore, retrying. " +
+               "{} tries left", numTries, e);
+           try {
+             Thread.sleep(retryInterval);
+           } catch (InterruptedException ie) {
+             // Restore the interrupted status, since we do not want to catch it.
+             LOG.debug("Interrupted while sleeping before retrying.", ie);
+             Thread.currentThread().interrupt();
+           }
+           // If we're here, we'll proceed down the next while loop iteration.
+         } else {
+           // we've reached our limit, throw the last one.
+           if (retriable){
+             LOG.warn("Exception retry limit reached, not retrying any longer.",
+               e);
+           } else {
+             LOG.debug("Non-retriable exception during ObjectStore initialize.", e);
+           }
+           throw e;
+         }
+       }
+     }
+   }
+ 
+   private static final Set<Class<? extends Throwable>> retriableExceptionClasses =
+       new HashSet<>(Arrays.asList(JDOCanRetryException.class));
+   /**
+    * Helper function for initialize to determine if we should retry an exception.
+    * We return true if the exception is of a known type of retriable exceptions, or if one
+    * of its recursive .getCause returns a known type of retriable exception.
+    */
+   private boolean isRetriableException(Throwable e) {
+     if (e == null){
+       return false;
+     }
+     if (retriableExceptionClasses.contains(e.getClass())){
+       return true;
+     }
+     for (Class<? extends Throwable> c : retriableExceptionClasses){
+       if (c.isInstance(e)){
+         return true;
+       }
+     }
+ 
+     if (e.getCause() == null){
+       return false;
+     }
+     return isRetriableException(e.getCause());
+   }
+ 
+   /**
+    * private helper to do initialization routine, so we can retry if needed if it fails.
+    * @param dsProps
+    */
+   private void initializeHelper(Properties dsProps) {
+     LOG.debug("ObjectStore, initialize called");
+     prop = dsProps;
+     pm = getPersistenceManager();
+     try {
+       String productName = MetaStoreDirectSql.getProductName(pm);
+       sqlGenerator = new SQLGenerator(DatabaseProduct.determineDatabaseProduct(productName), conf);
+     } catch (SQLException e) {
+       LOG.error("error trying to figure out the database product", e);
+       throw new RuntimeException(e);
+     }
+     isInitialized = pm != null;
+     if (isInitialized) {
+       dbType = determineDatabaseProduct();
+       expressionProxy = createExpressionProxy(conf);
+       if (MetastoreConf.getBoolVar(getConf(), ConfVars.TRY_DIRECT_SQL)) {
+         String schema = prop.getProperty("javax.jdo.mapping.Schema");
+         schema = org.apache.commons.lang.StringUtils.defaultIfBlank(schema, null);
+         directSql = new MetaStoreDirectSql(pm, conf, schema);
+       }
+     }
+     LOG.debug("RawStore: {}, with PersistenceManager: {}" +
+         " created in the thread with id: {}", this, pm, Thread.currentThread().getId());
+   }
+ 
+   private DatabaseProduct determineDatabaseProduct() {
+     try {
+       return DatabaseProduct.determineDatabaseProduct(getProductName(pm));
+     } catch (SQLException e) {
+       LOG.warn("Cannot determine database product; assuming OTHER", e);
+       return DatabaseProduct.OTHER;
+     }
+   }
+ 
+   private static String getProductName(PersistenceManager pm) {
+     JDOConnection jdoConn = pm.getDataStoreConnection();
+     try {
+       return ((Connection)jdoConn.getNativeConnection()).getMetaData().getDatabaseProductName();
+     } catch (Throwable t) {
+       LOG.warn("Error retrieving product name", t);
+       return null;
+     } finally {
+       jdoConn.close(); // We must release the connection before we call other pm methods.
+     }
+   }
+ 
+   /**
+    * Creates the proxy used to evaluate expressions. This is here to prevent circular
+    * dependency - ql -&gt; metastore client &lt;-&gt metastore server -&gt ql. If server and
+    * client are split, this can be removed.
+    * @param conf Configuration.
+    * @return The partition expression proxy.
+    */
+   private static PartitionExpressionProxy createExpressionProxy(Configuration conf) {
+     String className = MetastoreConf.getVar(conf, ConfVars.EXPRESSION_PROXY_CLASS);
+     try {
+       Class<? extends PartitionExpressionProxy> clazz =
+            JavaUtils.getClass(className, PartitionExpressionProxy.class);
+       return JavaUtils.newInstance(clazz, new Class<?>[0], new Object[0]);
+     } catch (MetaException e) {
+       LOG.error("Error loading PartitionExpressionProxy", e);
+       throw new RuntimeException("Error loading PartitionExpressionProxy: " + e.getMessage());
+     }
+   }
+ 
+   /**
+    * Configure the SSL properties of the connection from provided config
+    * @param conf
+    */
+   private static void configureSSL(Configuration conf) {
+     // SSL support
+     String sslPropString = MetastoreConf.getVar(conf, ConfVars.DBACCESS_SSL_PROPS);
+     if (org.apache.commons.lang.StringUtils.isNotEmpty(sslPropString)) {
+       LOG.info("Metastore setting SSL properties of the connection to backed DB");
+       for (String sslProp : sslPropString.split(",")) {
+         String[] pair = sslProp.trim().split("=");
+         if (pair != null && pair.length == 2) {
+           System.setProperty(pair[0].trim(), pair[1].trim());
+         } else {
+           LOG.warn("Invalid metastore property value for {}", ConfVars.DBACCESS_SSL_PROPS);
+         }
+       }
+     }
+   }
+ 
+   /**
+    * Properties specified in hive-default.xml override the properties specified
+    * in jpox.properties.
+    */
+   @SuppressWarnings("nls")
+   private static Properties getDataSourceProps(Configuration conf) {
+     Properties prop = new Properties();
+     correctAutoStartMechanism(conf);
+ 
+     // First, go through and set all our values for datanucleus and javax.jdo parameters.  This
+     // has to be a separate first step because we don't set the default values in the config object.
+     for (ConfVars var : MetastoreConf.dataNucleusAndJdoConfs) {
+       String confVal = MetastoreConf.getAsString(conf, var);
+       String varName = var.getVarname();
+       Object prevVal = prop.setProperty(varName, confVal);
+       if (MetastoreConf.isPrintable(varName)) {
+         LOG.debug("Overriding {} value {} from jpox.properties with {}",
+           varName, prevVal, confVal);
+       }
+     }
+ 
+     // Now, we need to look for any values that the user set that MetastoreConf doesn't know about.
+     // TODO Commenting this out for now, as it breaks because the conf values aren't getting properly
+     // interpolated in case of variables.  See HIVE-17788.
+     /*
+     for (Map.Entry<String, String> e : conf) {
+       if (e.getKey().startsWith("datanucleus.") || e.getKey().startsWith("javax.jdo.")) {
+         // We have to handle this differently depending on whether it is a value known to
+         // MetastoreConf or not.  If it is, we need to get the default value if a value isn't
+         // provided.  If not, we just set whatever the user has set.
+         Object prevVal = prop.setProperty(e.getKey(), e.getValue());
+         if (LOG.isDebugEnabled() && MetastoreConf.isPrintable(e.getKey())) {
+           LOG.debug("Overriding " + e.getKey() + " value " + prevVal
+               + " from  jpox.properties with " + e.getValue());
+         }
+       }
+     }
+     */
+ 
+     // Password may no longer be in the conf, use getPassword()
+     try {
+       String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
+       if (org.apache.commons.lang.StringUtils.isNotEmpty(passwd)) {
+         // We can get away with the use of varname here because varname == hiveName for PWD
+         prop.setProperty(ConfVars.PWD.getVarname(), passwd);
+       }
+     } catch (IOException err) {
+       throw new RuntimeException("Error getting metastore password: " + err.getMessage(), err);
+     }
+ 
+     if (LOG.isDebugEnabled()) {
+       for (Entry<Object, Object> e : prop.entrySet()) {
+         if (MetastoreConf.isPrintable(e.getKey().toString())) {
+           LOG.debug("{} = {}", e.getKey(), e.getValue());
+         }
+       }
+     }
+ 
+     return prop;
+   }
+ 
+   /**
+    * Update conf to set datanucleus.autoStartMechanismMode=ignored.
+    * This is necessary to able to use older version of hive against
+    * an upgraded but compatible metastore schema in db from new version
+    * of hive
+    * @param conf
+    */
+   private static void correctAutoStartMechanism(Configuration conf) {
+     final String autoStartKey = "datanucleus.autoStartMechanismMode";
+     final String autoStartIgnore = "ignored";
+     String currentAutoStartVal = conf.get(autoStartKey);
+     if (!autoStartIgnore.equalsIgnoreCase(currentAutoStartVal)) {
+       LOG.warn("{} is set to unsupported value {} . Setting it to value: {}", autoStartKey,
+         conf.get(autoStartKey), autoStartIgnore);
+     }
+     conf.set(autoStartKey, autoStartIgnore);
+   }
+ 
+   private static synchronized PersistenceManagerFactory getPMF() {
+     if (pmf == null) {
+ 
+       Configuration conf = MetastoreConf.newMetastoreConf();
+       DataSourceProvider dsp = DataSourceProviderFactory.hasProviderSpecificConfigurations(conf) ?
+               DataSourceProviderFactory.getDataSourceProvider(conf) : null;
+ 
+       if (dsp == null) {
+         pmf = JDOHelper.getPersistenceManagerFactory(prop);
+       } else {
+         try {
+           DataSource ds = dsp.create(conf);
+           Map<Object, Object> dsProperties = new HashMap<>();
+           //Any preexisting datanucleus property should be passed along
+           dsProperties.putAll(prop);
+           dsProperties.put(PropertyNames.PROPERTY_CONNECTION_FACTORY, ds);
+           dsProperties.put(PropertyNames.PROPERTY_CONNECTION_FACTORY2, ds);
+           dsProperties.put("javax.jdo.PersistenceManagerFactoryClass",
+               "org.datanucleus.api.jdo.JDOPersistenceManagerFactory");
+           pmf = JDOHelper.getPersistenceManagerFactory(dsProperties);
+         } catch (SQLException e) {
+           LOG.warn("Could not create PersistenceManagerFactory using " +
+               "connection pool properties, will fall back", e);
+           pmf = JDOHelper.getPersistenceManagerFactory(prop);
+         }
+       }
+       DataStoreCache dsc = pmf.getDataStoreCache();
+       if (dsc != null) {
+         String objTypes = MetastoreConf.getVar(conf, ConfVars.CACHE_PINOBJTYPES);
+         LOG.info("Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes=\"{}\"", objTypes);
+         if (org.apache.commons.lang.StringUtils.isNotEmpty(objTypes)) {
+           String[] typeTokens = objTypes.toLowerCase().split(",");
+           for (String type : typeTokens) {
+             type = type.trim();
+             if (PINCLASSMAP.containsKey(type)) {
+               dsc.pinAll(true, PINCLASSMAP.get(type));
+             } else {
+               LOG.warn("{} is not one of the pinnable object types: {}", type,
+                 org.apache.commons.lang.StringUtils.join(PINCLASSMAP.keySet(), " "));
+             }
+           }
+         }
+       } else {
+         LOG.warn("PersistenceManagerFactory returned null DataStoreCache object. Unable to initialize object pin types defined by hive.metastore.cache.pinobjtypes");
+       }
+     }
+     return pmf;
+   }
+ 
+   @InterfaceAudience.LimitedPrivate({"HCATALOG"})
+   @InterfaceStability.Evolving
+   public PersistenceManager getPersistenceManager() {
+     return getPMF().getPersistenceManager();
+   }
+ 
+   @Override
+   public void shutdown() {
+     LOG.debug("RawStore: {}, with PersistenceManager: {} will be shutdown", this, pm);
+     if (pm != null) {
+       pm.close();
+       pm = null;
+     }
+   }
+ 
+   /**
+    * Opens a new one or the one already created Every call of this function must
+    * have corresponding commit or rollback function call
+    *
+    * @return an active transaction
+    */
+ 
+   @Override
+   public boolean openTransaction() {
+     openTrasactionCalls++;
+     if (openTrasactionCalls == 1) {
+       currentTransaction = pm.currentTransaction();
+       currentTransaction.begin();
+       transactionStatus = TXN_STATUS.OPEN;
+     } else {
+       // openTransactionCalls > 1 means this is an interior transaction
+       // We should already have a transaction created that is active.
+       if ((currentTransaction == null) || (!currentTransaction.isActive())){
+         throw new RuntimeException("openTransaction called in an interior"
+             + " transaction scope, but currentTransaction is not active.");
+       }
+     }
+ 
+     boolean result = currentTransaction.isActive();
+     debugLog("Open transaction: count = " + openTrasactionCalls + ", isActive = " + result);
+     return result;
+   }
+ 
+   /**
+    * if this is the commit of the first open call then an actual commit is
+    * called.
+    *
+    * @return Always returns true
+    */
+   @Override
+   @SuppressWarnings("nls")
+   public boolean commitTransaction() {
+     if (TXN_STATUS.ROLLBACK == transactionStatus) {
+       debugLog("Commit transaction: rollback");
+       return false;
+     }
+     if (openTrasactionCalls <= 0) {
+       RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+           + openTrasactionCalls + ". This probably indicates that there are unbalanced " +
+           "calls to openTransaction/commitTransaction");
+       LOG.error("Unbalanced calls to open/commit Transaction", e);
+       throw e;
+     }
+     if (!currentTransaction.isActive()) {
+       RuntimeException e = new RuntimeException("commitTransaction was called but openTransactionCalls = "
+           + openTrasactionCalls + ". This probably indicates that there are unbalanced " +
+           "calls to openTransaction/commitTransaction");
+       LOG.error("Unbalanced calls to open/commit Transaction", e);
+       throw e;
+     }
+     openTrasactionCalls--;
+     debugLog("Commit transaction: count = " + openTrasactionCalls + ", isactive "+ currentTransaction.isActive());
+ 
+     if ((openTrasactionCalls == 0) && currentTransaction.isActive()) {
+       transactionStatus = TXN_STATUS.COMMITED;
+       currentTransaction.commit();
+     }
+     return true;
+   }
+ 
+   /**
+    * @return true if there is an active transaction. If the current transaction
+    *         is either committed or rolled back it returns false
+    */
+   @Override
+   public boolean isActiveTransaction() {
+     if (currentTransaction == null) {
+       return false;
+     }
+     return currentTransaction.isActive();
+   }
+ 
+   /**
+    * Rolls back the current transaction if it is active
+    */
+   @Override
+   public void rollbackTransaction() {
+     if (openTrasactionCalls < 1) {
+       debugLog("rolling back transaction: no open transactions: " + openTrasactionCalls);
+       return;
+     }
+     debugLog("Rollback transaction, isActive: " + currentTransaction.isActive());
+     try {
+       if (currentTransaction.isActive()
+           && transactionStatus != TXN_STATUS.ROLLBACK) {
+         currentTransaction.rollback();
+       }
+     } finally {
+       openTrasactionCalls = 0;
+       transactionStatus = TXN_STATUS.ROLLBACK;
+       // remove all detached objects from the cache, since the transaction is
+       // being rolled back they are no longer relevant, and this prevents them
+       // from reattaching in future transactions
+       pm.evictAll();
+     }
+   }
+ 
+   @Override
+   public void createCatalog(Catalog cat) throws MetaException {
+     LOG.debug("Creating catalog " + cat.getName());
+     boolean committed = false;
+     MCatalog mCat = catToMCat(cat);
+     try {
+       openTransaction();
+       pm.makePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public void alterCatalog(String catName, Catalog cat)
+       throws MetaException, InvalidOperationException {
+     if (!cat.getName().equals(catName)) {
+       throw new InvalidOperationException("You cannot change a catalog's name");
+     }
+     boolean committed = false;
+     try {
+       MCatalog mCat = getMCatalog(catName);
+       if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getLocationUri())) {
+         mCat.setLocationUri(cat.getLocationUri());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(cat.getDescription())) {
+         mCat.setDescription(cat.getDescription());
+       }
+       openTransaction();
+       pm.makePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     LOG.debug("Fetching catalog " + catalogName);
+     MCatalog mCat = getMCatalog(catalogName);
+     if (mCat == null) {
+       throw new NoSuchObjectException("No catalog " + catalogName);
+     }
+     return mCatToCat(mCat);
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws MetaException {
+     LOG.debug("Fetching all catalog names");
+     boolean commited = false;
+     List<String> catalogs = null;
+ 
+     String queryStr = "select name from org.apache.hadoop.hive.metastore.model.MCatalog";
+     Query query = null;
+ 
+     openTransaction();
+     try {
+       query = pm.newQuery(queryStr);
+       query.setResult("name");
+       catalogs = new ArrayList<>((Collection<String>) query.execute());
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     Collections.sort(catalogs);
+     return catalogs;
+   }
+ 
+   @Override
+   public void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException {
+     LOG.debug("Dropping catalog " + catalogName);
+     boolean committed = false;
+     try {
+       openTransaction();
+       MCatalog mCat = getMCatalog(catalogName);
+       pm.retrieve(mCat);
+       if (mCat == null) {
+         throw new NoSuchObjectException("No catalog " + catalogName);
+       }
+       pm.deletePersistent(mCat);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   private MCatalog getMCatalog(String catalogName) throws MetaException {
+     boolean committed = false;
+     Query query = null;
+     try {
+       openTransaction();
+       catalogName = normalizeIdentifier(catalogName);
+       query = pm.newQuery(MCatalog.class, "name == catname");
+       query.declareParameters("java.lang.String catname");
+       query.setUnique(true);
+       MCatalog mCat = (MCatalog)query.execute(catalogName);
+       pm.retrieve(mCat);
+       committed = commitTransaction();
+       return mCat;
+     } finally {
+       rollbackAndCleanup(committed, query);
+     }
+   }
+ 
+   private MCatalog catToMCat(Catalog cat) {
+     MCatalog mCat = new MCatalog();
+     mCat.setName(normalizeIdentifier(cat.getName()));
+     if (cat.isSetDescription()) {
+       mCat.setDescription(cat.getDescription());
+     }
+     mCat.setLocationUri(cat.getLocationUri());
+     return mCat;
+   }
+ 
+   private Catalog mCatToCat(MCatalog mCat) {
+     Catalog cat = new Catalog(mCat.getName(), mCat.getLocationUri());
+     if (mCat.getDescription() != null) {
+       cat.setDescription(mCat.getDescription());
+     }
+     return cat;
+   }
+ 
+   @Override
+   public void createDatabase(Database db) throws InvalidObjectException, MetaException {
+     boolean commited = false;
+     MDatabase mdb = new MDatabase();
+     assert db.getCatalogName() != null;
+     mdb.setCatalogName(normalizeIdentifier(db.getCatalogName()));
+     assert mdb.getCatalogName() != null;
+     mdb.setName(db.getName().toLowerCase());
+     mdb.setLocationUri(db.getLocationUri());
+     mdb.setDescription(db.getDescription());
+     mdb.setParameters(db.getParameters());
+     mdb.setOwnerName(db.getOwnerName());
+     PrincipalType ownerType = db.getOwnerType();
+     mdb.setOwnerType((null == ownerType ? PrincipalType.USER.name() : ownerType.name()));
+     try {
+       openTransaction();
+       pm.makePersistent(mdb);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @SuppressWarnings("nls")
+   private MDatabase getMDatabase(String catName, String name) throws NoSuchObjectException {
+     MDatabase mdb = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       name = normalizeIdentifier(name);
+       catName = normalizeIdentifier(catName);
+       query = pm.newQuery(MDatabase.class, "name == dbname && catalogName == catname");
+       query.declareParameters("java.lang.String dbname, java.lang.String catname");
+       query.setUnique(true);
+       mdb = (MDatabase) query.execute(name, catName);
+       pm.retrieve(mdb);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     if (mdb == null) {
+       throw new NoSuchObjectException("There is no database " + catName + "." + name);
+     }
+     return mdb;
+   }
+ 
+   @Override
+   public Database getDatabase(String catalogName, String name) throws NoSuchObjectException {
+     MetaException ex = null;
+     Database db = null;
+     try {
+       db = getDatabaseInternal(catalogName, name);
+     } catch (MetaException e) {
+       // Signature restriction to NSOE, and NSOE being a flat exception prevents us from
+       // setting the cause of the NSOE as the MetaException. We should not lose the info
+       // we got here, but it's very likely that the MetaException is irrelevant and is
+       // actually an NSOE message, so we should log it and throw an NSOE with the msg.
+       ex = e;
+     }
+     if (db == null) {
+       LOG.warn("Failed to get database {}.{}, returning NoSuchObjectException",
+           catalogName, name, ex);
+       throw new NoSuchObjectException(name + (ex == null ? "" : (": " + ex.getMessage())));
+     }
+     return db;
+   }
+ 
+   public Database getDatabaseInternal(String catalogName, String name)
+       throws MetaException, NoSuchObjectException {
+     return new GetDbHelper(catalogName, name, true, true) {
+       @Override
+       protected Database getSqlResult(GetHelper<Database> ctx) throws MetaException {
+         return directSql.getDatabase(catalogName, dbName);
+       }
+ 
+       @Override
+       protected Database getJdoResult(GetHelper<Database> ctx) throws MetaException, NoSuchObjectException {
+         return getJDODatabase(catalogName, dbName);
+       }
+     }.run(false);
+    }
+ 
+   public Database getJDODatabase(String catName, String name) throws NoSuchObjectException {
+     MDatabase mdb = null;
+     boolean commited = false;
+     try {
+       openTransaction();
+       mdb = getMDatabase(catName, name);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     Database db = new Database();
+     db.setName(mdb.getName());
+     db.setDescription(mdb.getDescription());
+     db.setLocationUri(mdb.getLocationUri());
+     db.setParameters(convertMap(mdb.getParameters()));
+     db.setOwnerName(mdb.getOwnerName());
+     String type = org.apache.commons.lang.StringUtils.defaultIfBlank(mdb.getOwnerType(), null);
+     PrincipalType principalType = (type == null) ? null : PrincipalType.valueOf(type);
+     db.setOwnerType(principalType);
+     db.setCatalogName(catName);
+     return db;
+   }
+ 
+   /**
+    * Alter the database object in metastore. Currently only the parameters
+    * of the database or the owner can be changed.
+    * @param dbName the database name
+    * @param db the Hive Database object
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    */
+   @Override
+   public boolean alterDatabase(String catName, String dbName, Database db)
+     throws MetaException, NoSuchObjectException {
+ 
+     MDatabase mdb = null;
+     boolean committed = false;
+     try {
+       mdb = getMDatabase(catName, dbName);
+       mdb.setParameters(db.getParameters());
+       mdb.setOwnerName(db.getOwnerName());
+       if (db.getOwnerType() != null) {
+         mdb.setOwnerType(db.getOwnerType().name());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(db.getDescription())) {
+         mdb.setDescription(db.getDescription());
+       }
+       if (org.apache.commons.lang.StringUtils.isNotBlank(db.getLocationUri())) {
+         mdb.setLocationUri(db.getLocationUri());
+       }
+       openTransaction();
+       pm.makePersistent(mdb);
+       committed = commitTransaction();
+     } finally {
+       if (!committed) {
+         rollbackTransaction();
+         return false;
+       }
+     }
+     return true;
+   }
+ 
+   @Override
+   public boolean dropDatabase(String catName, String dbname)
+       throws NoSuchObjectException, MetaException {
+     boolean success = false;
+     LOG.info("Dropping database {}.{} along with all tables", catName, dbname);
+     dbname = normalizeIdentifier(dbname);
+     catName = normalizeIdentifier(catName);
+     QueryWrapper queryWrapper = new QueryWrapper();
+     try {
+       openTransaction();
+ 
+       // then drop the database
+       MDatabase db = getMDatabase(catName, dbname);
+       pm.retrieve(db);
+       if (db != null) {
+         List<MDBPrivilege> dbGrants = this.listDatabaseGrants(catName, dbname, null, queryWrapper);
+         if (CollectionUtils.isNotEmpty(dbGrants)) {
+           pm.deletePersistentAll(dbGrants);
+         }
+         pm.deletePersistent(db);
+       }
+       success = commitTransaction();
+     } finally {
+       rollbackAndCleanup(success, queryWrapper);
+     }
+     return success;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String pattern) throws MetaException {
+     if (pattern == null || pattern.equals("*")) {
+       return getAllDatabases(catName);
+     }
+     boolean commited = false;
+     List<String> databases = null;
+     Query query = null;
+     try {
+       openTransaction();
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       String[] subpatterns = pattern.trim().split("\\|");
+       StringBuilder filterBuilder = new StringBuilder();
+       List<String> parameterVals = new ArrayList<>(subpatterns.length);
+       appendSimpleCondition(filterBuilder, "catalogName", new String[] {catName}, parameterVals);
+       appendPatternCondition(filterBuilder, "name", subpatterns, parameterVals);
+       query = pm.newQuery(MDatabase.class, filterBuilder.toString());
+       query.setResult("name");
+       query.setOrdering("name ascending");
+       Collection<String> names = (Collection<String>) query.executeWithArray(parameterVals.toArray(new String[0]));
+       databases = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return databases;
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws MetaException {
+     boolean commited = false;
+     List<String> databases = null;
+ 
+     Query query = null;
+     catName = normalizeIdentifier(catName);
+ 
+     openTransaction();
+     try {
+       query = pm.newQuery("select name from org.apache.hadoop.hive.metastore.model.MDatabase " +
+           "where catalogName == catname");
+       query.declareParameters("java.lang.String catname");
+       query.setResult("name");
+       databases = new ArrayList<>((Collection<String>) query.execute(catName));
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     Collections.sort(databases);
+     return databases;
+   }
+ 
+   private MType getMType(Type type) {
+     List<MFieldSchema> fields = new ArrayList<>();
+     if (type.getFields() != null) {
+       for (FieldSchema field : type.getFields()) {
+         fields.add(new MFieldSchema(field.getName(), field.getType(), field
+             .getComment()));
+       }
+     }
+     return new MType(type.getName(), type.getType1(), type.getType2(), fields);
+   }
+ 
+   private Type getType(MType mtype) {
+     List<FieldSchema> fields = new ArrayList<>();
+     if (mtype.getFields() != null) {
+       for (MFieldSchema field : mtype.getFields()) {
+         fields.add(new FieldSchema(field.getName(), field.getType(), field
+             .getComment()));
+       }
+     }
+     Type ret = new Type();
+     ret.setName(mtype.getName());
+     ret.setType1(mtype.getType1());
+     ret.setType2(mtype.getType2());
+     ret.setFields(fields);
+     return ret;
+   }
+ 
+   @Override
+   public boolean createType(Type type) {
+     boolean success = false;
+     MType mtype = getMType(type);
+     boolean commited = false;
+     try {
+       openTransaction();
+       pm.makePersistent(mtype);
+       commited = commitTransaction();
+       success = true;
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   @Override
+   public Type getType(String typeName) {
+     Type type = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(MType.class, "name == typeName");
+       query.declareParameters("java.lang.String typeName");
+       query.setUnique(true);
+       MType mtype = (MType) query.execute(typeName.trim());
+       pm.retrieve(type);
+       if (mtype != null) {
+         type = getType(mtype);
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return type;
+   }
+ 
+   @Override
+   public boolean dropType(String typeName) {
+     boolean success = false;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(MType.class, "name == typeName");
+       query.declareParameters("java.lang.String typeName");
+       query.setUnique(true);
+       MType type = (MType) query.execute(typeName.trim());
+       pm.retrieve(type);
+       if (type != null) {
+         pm.deletePersistent(type);
+       }
+       success = commitTransaction();
+     } catch (JDOObjectNotFoundException e) {
+       success = commitTransaction();
+       LOG.debug("type not found {}", typeName, e);
+     } finally {
+       rollbackAndCleanup(success, query);
+     }
+     return success;
+   }
+ 
+   @Override
+   public List<String> createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints)
+     throws InvalidObjectException, MetaException {
+     boolean success = false;
+     try {
+       openTransaction();
+       createTable(tbl);
+       // Add constraints.
+       // We need not do a deep retrieval of the Table Column Descriptor while persisting the
+       // constraints since this transaction involving create table is not yet committed.
+       List<String> constraintNames = new ArrayList<>();
+       if (foreignKeys != null) {
+         constraintNames.addAll(addForeignKeys(foreignKeys, false, primaryKeys, uniqueConstraints));
+       }
+       if (primaryKeys != null) {
+         constraintNames.addAll(addPrimaryKeys(primaryKeys, false));
+       }
+       if (uniqueConstraints != null) {
+         constraintNames.addAll(addUniqueConstraints(uniqueConstraints, false));
+       }
+       if (notNullConstraints != null) {
+         constraintNames.addAll(addNotNullConstraints(notNullConstraints, false));
+       }
+       if (defaultConstraints != null) {
+         constraintNames.addAll(addDefaultConstraints(defaultConstraints, false));
+       }
+       if (checkConstraints != null) {
+         constraintNames.addAll(addCheckConstraints(checkConstraints, false));
+       }
+       success = commitTransaction();
+       return constraintNames;
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     boolean commited = false;
++    MTable mtbl = null;
++
+     try {
+       openTransaction();
+ 
 -      MTable mtbl = convertToMTable(tbl);
++      mtbl = convertToMTable(tbl);
++      if (TxnUtils.isTransactionalTable(tbl)) {
++        mtbl.setWriteId(tbl.getWriteId());
++      }
+       pm.makePersistent(mtbl);
+ 
+       if (tbl.getCreationMetadata() != null) {
+         MCreationMetadata mcm = convertToMCreationMetadata(tbl.getCreationMetadata());
+         pm.makePersistent(mcm);
+       }
+ 
+       PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges();
+       List<Object> toPersistPrivObjs = new ArrayList<>();
+       if (principalPrivs != null) {
+         int now = (int)(System.currentTimeMillis()/1000);
+ 
+         Map<String, List<PrivilegeGrantInfo>> userPrivs = principalPrivs.getUserPrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, userPrivs, PrincipalType.USER, "SQL");
+ 
+         Map<String, List<PrivilegeGrantInfo>> groupPrivs = principalPrivs.getGroupPrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, groupPrivs, PrincipalType.GROUP, "SQL");
+ 
+         Map<String, List<PrivilegeGrantInfo>> rolePrivs = principalPrivs.getRolePrivileges();
+         putPersistentPrivObjects(mtbl, toPersistPrivObjs, now, rolePrivs, PrincipalType.ROLE, "SQL");
+       }
+       pm.makePersistentAll(toPersistPrivObjs);
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       } else {
+         if (MetaStoreUtils.isMaterializedViewTable(tbl)) {
+           // Add to the invalidation cache
+           MaterializationsInvalidationCache.get().createMaterializedView(
+               tbl.getDbName(), tbl.getTableName(), tbl.getCreationMetadata().getTablesUsed(),
+               tbl.getCreationMetadata().getValidTxnList());
+         }
+       }
+     }
+   }
+ 
+   /**
+    * Convert PrivilegeGrantInfo from privMap to MTablePrivilege, and add all of
+    * them to the toPersistPrivObjs. These privilege objects will be persisted as
+    * part of createTable.
+    *
+    * @param mtbl
+    * @param toPersistPrivObjs
+    * @param now
+    * @param privMap
+    * @param type
+    */
+   private void putPersistentPrivObjects(MTable mtbl, List<Object> toPersistPrivObjs,
+       int now, Map<String, List<PrivilegeGrantInfo>> privMap, PrincipalType type, String authorizer) {
+     if (privMap != null) {
+       for (Map.Entry<String, List<PrivilegeGrantInfo>> entry : privMap
+           .entrySet()) {
+         String principalName = entry.getKey();
+         List<PrivilegeGrantInfo> privs = entry.getValue();
+         for (int i = 0; i < privs.size(); i++) {
+           PrivilegeGrantInfo priv = privs.get(i);
+           if (priv == null) {
+             continue;
+           }
+           MTablePrivilege mTblSec = new MTablePrivilege(
+               principalName, type.toString(), mtbl, priv.getPrivilege(),
+               now, priv.getGrantor(), priv.getGrantorType().toString(), priv
+                   .isGrantOption(), authorizer);
+           toPersistPrivObjs.add(mTblSec);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public boolean dropTable(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean materializedView = false;
+     boolean success = false;
+     try {
+       openTransaction();
+       MTable tbl = getMTable(catName, dbName, tableName);
+       pm.retrieve(tbl);
+       if (tbl != null) {
+         materializedView = TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType());
+         // first remove all the grants
+         List<MTablePrivilege> tabGrants = listAllTableGrants(catName, dbName, tableName);
+         if (CollectionUtils.isNotEmpty(tabGrants)) {
+           pm.deletePersistentAll(tabGrants);
+         }
+         List<MTableColumnPrivilege> tblColGrants = listTableAllColumnGrants(catName, dbName,
+             tableName);
+         if (CollectionUtils.isNotEmpty(tblColGrants)) {
+           pm.deletePersistentAll(tblColGrants);
+         }
+ 
+         List<MPartitionPrivilege> partGrants = this.listTableAllPartitionGrants(catName, dbName, tableName);
+         if (CollectionUtils.isNotEmpty(partGrants)) {
+           pm.deletePersistentAll(partGrants);
+         }
+ 
+         List<MPartitionColumnPrivilege> partColGrants = listTableAllPartitionColumnGrants(catName, dbName,
+             tableName);
+         if (CollectionUtils.isNotEmpty(partColGrants)) {
+           pm.deletePersistentAll(partColGrants);
+         }
+         // delete column statistics if present
+         try {
+           deleteTableColumnStatistics(catName, dbName, tableName, null);
+         } catch (NoSuchObjectException e) {
+           LOG.info("Found no table level column statistics associated with {} to delete",
+               TableName.getQualified(catName, dbName, tableName));
+         }
+ 
++        // TODO## remove? unused
++        Table table = convertToTable(tbl);
++
+         List<MConstraint> tabConstraints = listAllTableConstraintsWithOptionalConstraintName(
+                                            catName, dbName, tableName, null);
+         if (CollectionUtils.isNotEmpty(tabConstraints)) {
+           pm.deletePersistentAll(tabConstraints);
+         }
+ 
+         preDropStorageDescriptor(tbl.getSd());
+ 
+         if (materializedView) {
+           dropCreationMetadata(tbl.getDatabase().getCatalogName(),
+               tbl.getDatabase().getName(), tbl.getTableName());
+         }
+ 
+         // then remove the table
+         pm.deletePersistentAll(tbl);
+       }
+       success = commitTransaction();
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       } else {
+         if (materializedView) {
+           MaterializationsInvalidationCache.get().dropMaterializedView(dbName, tableName);
+         }
+       }
+     }
+     return success;
+   }
+ 
+   private boolean dropCreationMetadata(String catName, String dbName, String tableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, InvalidInputException {
+     boolean success = false;
+     dbName = normalizeIdentifier(dbName);
+     tableName = normalizeIdentifier(tableName);
+     try {
+       openTransaction();
+       MCreationMetadata mcm = getCreationMetadata(catName, dbName, tableName);
+       pm.retrieve(mcm);
+       if (mcm != null) {
+         pm.deletePersistentAll(mcm);
+       }
+       success = commitTransaction();
+     } finally {
+       if (!success) {
+         rollbackTransaction();
+       }
+     }
+     return success;
+   }
+ 
+   private List<MConstraint> listAllTableConstraintsWithOptionalConstraintName(
+       String catName, String dbName, String tableName, String constraintname) {
+     catName = normalizeIdentifier(catName);
+     dbName = normalizeIdentifier(dbName);
+     tableName = normalizeIdentifier(tableName);
+     constraintname = constraintname!=null?normalizeIdentifier(constraintname):null;
+     List<MConstraint> mConstraints = null;
+     List<String> constraintNames = new ArrayList<>();
+     Query query = null;
+ 
+     try {
+       query = pm.newQuery("select constraintName from org.apache.hadoop.hive.metastore.model.MConstraint  where "
+         + "((parentTable.tableName == ptblname && parentTable.database.name == pdbname && " +
+               "parentTable.database.catalogName == pcatname) || "
+         + "(childTable != null && childTable.tableName == ctblname &&" +
+               "childTable.database.name == cdbname && childTable.database.catalogName == ccatname)) " +
+           (constraintname != null ? " && constraintName == constraintname" : ""));
+       query.declareParameters("java.lang.String ptblname, java.lang.String pdbname,"
+           + "java.lang.String pcatname, java.lang.String ctblname, java.lang.String cdbname," +
+           "java.lang.String ccatname" +
+         (constraintname != null ? ", java.lang.String constraintname" : ""));
+       Collection<?> constraintNamesColl =
+         constraintname != null ?
+           ((Collection<?>) query.
+             executeWithArray(tableName, dbName, catName, tableName, dbName, catName, constraintname)):
+           ((Collection<?>) query.
+             executeWithArray(tableName, dbName, catName, tableName, dbName, catName));
+       for (Iterator<?> i = constraintNamesColl.iterator(); i.hasNext();) {
+         String currName = (String) i.next();
+         constraintNames.add(currName);
+       }
+       query = pm.newQuery(MConstraint.class);
+       query.setFilter("param.contains(constraintName)");
+       query.declareParameters("java.util.Collection param");
+       Collection<?> constraints = (Collection<?>)query.execute(constraintNames);
+       mConstraints = new ArrayList<>();
+       for (Iterator<?> i = constraints.iterator(); i.hasNext();) {
+         MConstraint currConstraint = (MConstraint) i.next();
+         mConstraints.add(currConstraint);
+       }
+     } finally {
+       if (query != null) {
+         query.closeAll();
+       }
+     }
+     return mConstraints;
+   }
+ 
++  private static String getFullyQualifiedTableName(String dbName, String tblName) {
++    return ((dbName == null || dbName.isEmpty()) ? "" : "\"" + dbName + "\".\"")
++        + "\"" + tblName + "\"";
++  }
++
+   @Override
 -  public Table getTable(String catName, String dbName, String tableName) throws MetaException {
++  public Table
++  getTable(String catName, String dbName, String tableName)
++      throws MetaException {
++    return getTable(catName, dbName, tableName, -1, null);
++  }
++
++  @Override
++  public Table getTable(String catName, String dbName, String tableName,
++                        long txnId, String writeIdList)
++      throws MetaException {
+     boolean commited = false;
+     Table tbl = null;
+     try {
+       openTransaction();
 -      tbl = convertToTable(getMTable(catName, dbName, tableName));
++      MTable mtable = getMTable(catName, dbName, tableName);
++      tbl = convertToTable(mtable);
+       // Retrieve creation metadata if needed
+       if (tbl != null && TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
+         tbl.setCreationMetadata(
 -            convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
++                convertToCreationMetadata(getCreationMetadata(catName, dbName, tableName)));
++      }
++
++      // If transactional non partitioned table,
++      // check whether the current version table statistics
++      // in the metastore comply with the client query's snapshot isolation.
++      // Note: a partitioned table has table stats and table snapshot in MPartiiton.
++      if (writeIdList != null) {
++        if (tbl != null
++            && TxnUtils.isTransactionalTable(tbl)
++            && tbl.getPartitionKeysSize() == 0) {
++          if (isCurrentStatsValidForTheQuery(mtable, txnId, writeIdList)) {
++            tbl.setIsStatsCompliant(true);
++          } else {
++            tbl.setIsStatsCompliant(false);
++            // Do not make persistent the following state since it is the query specific (not global).
++            StatsSetupConst.setBasicStatsState(tbl.getParameters(), StatsSetupConst.FALSE);
++            LOG.info("Removed COLUMN_STATS_ACCURATE from Table's parameters.");
++          }
++        }
+       }
+       commited = commitTransaction();
+     } finally {
+       if (!commited) {
+         rollbackTransaction();
+       }
+     }
+     return tbl;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern)
+       throws MetaException {
+     return getTables(catName, dbName, pattern, null);
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException {
+     try {
+       // We only support pattern matching via jdo since pattern matching in Java
+       // might be different than the one used by the metastore backends
+       return getTablesInternal(catName, dbName, pattern, tableType,
+           (pattern == null || pattern.equals(".*")), true);
+     } catch (NoSuchObjectException e) {
+       throw new MetaException(ExceptionUtils.getStackTrace(e));
+     }
+   }
+ 
+   @Override
+   public List<TableName> getTableNamesWithStats() throws MetaException, NoSuchObjectException {
+     return new GetListHelper<TableName>(null, null, null, true, false) {
+       @Override
+       protected List<TableName> getSqlResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         return directSql.getTableNamesWithStats();
+       }
+ 
+       @Override
+       protected List<TableName> getJdoResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         throw new UnsupportedOperationException("UnsupportedOperationException"); // TODO: implement?
+       }
+     }.run(false);
+   }
+ 
+   @Override
+   public Map<String, List<String>> getPartitionColsWithStats(String catName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException {
+     return new GetHelper<Map<String, List<String>>>(catName, dbName, null, true, false) {
+       @Override
+       protected Map<String, List<String>> getSqlResult(
+           GetHelper<Map<String, List<String>>> ctx) throws MetaException {
+         try {
+           return directSql.getColAndPartNamesWithStats(catName, dbName, tableName);
+         } catch (Throwable ex) {
+           LOG.error("DirectSQL failed", ex);
+           throw new MetaException(ex.getMessage());
+         }
+       }
+ 
+       @Override
+       protected Map<String, List<String>> getJdoResult(
+           GetHelper<Map<String, List<String>>> ctx) throws MetaException {
+         throw new UnsupportedOperationException("UnsupportedOperationException"); // TODO: implement?
+       }
+ 
+       @Override
+       protected String describeResult() {
+         return results.size() + " partitions";
+       }
+     }.run(false);
+   }
+ 
+   @Override
+   public List<TableName> getAllTableNamesForStats() throws MetaException, NoSuchObjectException {
+     return new GetListHelper<TableName>(null, null, null, true, false) {
+       @Override
+       protected List<TableName> getSqlResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         return directSql.getAllTableNamesForStats();
+       }
+ 
+       @Override
+       protected List<TableName> getJdoResult(
+           GetHelper<List<TableName>> ctx) throws MetaException {
+         boolean commited = false;
+         Query query = null;
+         List<TableName> result = new ArrayList<>();
+         openTransaction();
+         try {
+           String paramStr = "", whereStr = "";
+           for (int i = 0; i < MetaStoreDirectSql.STATS_TABLE_TYPES.length; ++i) {
+             if (i != 0) {
+               paramStr += ", ";
+               whereStr += "||";
+             }
+             paramStr += "java.lang.String tt" + i;
+             whereStr += " tableType == tt" + i;
+           }
+           query = pm.newQuery(MTable.class, whereStr);
+           query.declareParameters(paramStr);
+           @SuppressWarnings("unchecked")
+           Collection<MTable> tbls = (Collection<MTable>) query.executeWithArray(
+               query, MetaStoreDirectSql.STATS_TABLE_TYPES);
+           pm.retrieveAll(tbls);
+           for (MTable tbl : tbls) {
+             result.add(new TableName(
+                 tbl.getDatabase().getCatalogName(), tbl.getDatabase().getName(), tbl.getTableName()));
+           }
+           commited = commitTransaction();
+         } finally {
+           rollbackAndCleanup(commited, query);
+         }
+         return result;
+       }
+     }.run(false);
+   }
+ 
+   protected List<String> getTablesInternal(String catName, String dbName, String pattern,
+                                            TableType tableType, boolean allowSql, boolean allowJdo)
+       throws MetaException, NoSuchObjectException {
+     final String db_name = normalizeIdentifier(dbName);
+     final String cat_name = normalizeIdentifier(catName);
+     return new GetListHelper<String>(cat_name, dbName, null, allowSql, allowJdo) {
+       @Override
+       protected List<String> getSqlResult(GetHelper<List<String>> ctx)
+               throws MetaException {
+         return directSql.getTables(cat_name, db_name, tableType);
+       }
+ 
+       @Override
+       protected List<String> getJdoResult(GetHelper<List<String>> ctx)
+               throws MetaException, NoSuchObjectException {
+         return getTablesInternalViaJdo(cat_name, db_name, pattern, tableType);
+       }
+     }.run(false);
+   }
+ 
+   private List<String> getTablesInternalViaJdo(String catName, String dbName, String pattern,
+                                                TableType tableType) throws MetaException {
+     boolean commited = false;
+     Query query = null;
+     List<String> tbls = null;
+     try {
+       openTransaction();
+       dbName = normalizeIdentifier(dbName);
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       List<String> parameterVals = new ArrayList<>();
+       StringBuilder filterBuilder = new StringBuilder();
+       //adds database.name == dbName to the filter
+       appendSimpleCondition(filterBuilder, "database.name", new String[] {dbName}, parameterVals);
+       appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
+       if(pattern != null) {
+         appendPatternCondition(filterBuilder, "tableName", pattern, parameterVals);
+       }
+       if(tableType != null) {
+         appendPatternCondition(filterBuilder, "tableType", new String[] {tableType.toString()}, parameterVals);
+       }
+ 
+       query = pm.newQuery(MTable.class, filterBuilder.toString());
+       query.setResult("tableName");
+       query.setOrdering("tableName ascending");
+       Collection<String> names = (Collection<String>) query.executeWithArray(parameterVals.toArray(new String[0]));
+       tbls = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return tbls;
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException {
+     final String db_name = normalizeIdentifier(dbName);
+     catName = normalizeIdentifier(catName);
+     boolean commited = false;
+     Query<?> query = null;
+     List<String> tbls = null;
+     try {
+       openTransaction();
+       dbName = normalizeIdentifier(dbName);
+       query = pm.newQuery(MTable.class,
+           "database.name == db && database.catalogName == cat && tableType == tt && rewriteEnabled == re");
+       query.declareParameters(
+           "java.lang.String db, java.lang.String cat, java.lang.String tt, boolean re");
+       query.setResult("tableName");
+       Collection<String> names = (Collection<String>) query.executeWithArray(
+           db_name, catName, TableType.MATERIALIZED_VIEW.toString(), true);
+       tbls = new ArrayList<>(names);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return tbls;
+   }
+ 
+   @Override
+   public int getDatabaseCount() throws MetaException {
+     return getObjectCount("name", MDatabase.class.getName());
+   }
+ 
+   @Override
+   public int getPartitionCount() throws MetaException {
+     return getObjectCount("partitionName", MPartition.class.getName());
+   }
+ 
+   @Override
+   public int getTableCount() throws MetaException {
+     return getObjectCount("tableName", MTable.class.getName());
+   }
+ 
+   private int getObjectCount(String fieldName, String objName) {
+     Long result = 0L;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       String queryStr =
+         "select count(" + fieldName + ") from " + objName;
+       query = pm.newQuery(queryStr);
+       result = (Long) query.execute();
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return result.intValue();
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                       List<String> tableTypes) throws MetaException {
+ 
+     boolean commited = false;
+     Query query = null;
+     List<TableMeta> metas = new ArrayList<>();
+     try {
+       openTransaction();
+       // Take the pattern and split it on the | to get all the composing
+       // patterns
+       StringBuilder filterBuilder = new StringBuilder();
+       List<String> parameterVals = new ArrayList<>();
+       appendSimpleCondition(filterBuilder, "database.catalogName", new String[] {catName}, parameterVals);
+       if (dbNames != null && !dbNames.equals("*")) {
+         appendPatternCondition(filterBuilder, "database.name", dbNames, parameterVals);
+       }
+       if (tableNames != null && !tableNames.equals("*")) {
+         appendPatternCondition(filterBuilder, "tableName", tableNames, parameterVals);
+       }
+       if (tableTypes != null && !tableTypes.isEmpty()) {
+         appendSimpleCondition(filterBuilder, "tableType", tableTypes.toArray(new String[0]), parameterVals);
+       }
+ 
+       if (LOG.isDebugEnabled()) {
+         LOG.debug("getTableMeta with filter " + filterBuilder.toString() + " params: " +
+             StringUtils.join(parameterVals, ", "));
+       }
+       query = pm.newQuery(MTable.class, filterBuilder.toString());
+       Collection<MTable> tables = (Collection<MTable>) query.executeWithArray(parameterVals.toArray(new String[parameterVals.size()]));
+       for (MTable table : tables) {
+         TableMeta metaData = new TableMeta(
+             table.getDatabase().getName(), table.getTableName(), table.getTableType());
+         metaData.setComments(table.getParameters().get("comment"));
+         metas.add(metaData);
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return metas;
+   }
+ 
+   private StringBuilder appendPatternCondition(StringBuilder filterBuilder, String fieldName,
+       String[] elements, List<String> parameterVals) {
+     return appendCondition(filterBuilder, fieldName, elements, true, parameterVals);
+   }
+ 
+   private StringBuilder appendPatternCondition(StringBuilder builder,
+       String fieldName, String elements, List<String> parameters) {
+       elements = normalizeIdentifier(elements);
+     return appendCondition(builder, fieldName, elements.split("\\|"), true, parameters);
+   }
+ 
+   private StringBuilder appendSimpleCondition(StringBuilder builder,
+       String fieldName, String[] elements, List<String> parameters) {
+     return appendCondition(builder, fieldName, elements, false, parameters);
+   }
+ 
+   private StringBuilder appendCondition(StringBuilder builder,
+       String fieldName, String[] elements, boolean pattern, List<String> parameters) {
+     if (builder.length() > 0) {
+       builder.append(" && ");
+     }
+     builder.append(" (");
+     int length = builder.length();
+     for (String element : elements) {
+       if (pattern) {
+         element = "(?i)" + element.replaceAll("\\*", ".*");
+       }
+       parameters.add(element);
+       if (builder.length() > length) {
+         builder.append(" || ");
+       }
+       builder.append(fieldName);
+       if (pattern) {
+         builder.append(".matches(").append(JDO_PARAM).append(parameters.size()).append(")");
+       } else {
+         builder.append(" == ").append(JDO_PARAM).append(parameters.size());
+       }
+     }
+     builder.append(" )");
+     return builder;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws MetaException {
+     return getTables(catName, dbName, ".*");
+   }
+ 
+   class AttachedMTableInfo {
+     MTable mtbl;
+     MColumnDescriptor mcd;
+ 
+     public AttachedMTableInfo() {}
+ 
+     public AttachedMTableInfo(MTable mtbl, MColumnDescriptor mcd) {
+       this.mtbl = mtbl;
+       this.mcd = mcd;
+     }
+   }
+ 
+   private AttachedMTableInfo getMTable(String catName, String db, String table,
+                                        boolean retrieveCD) {
+     AttachedMTableInfo nmtbl = new AttachedMTableInfo();
+     MTable mtbl = null;
+     boolean commited = false;
+     Query query = null;
+     try {
+       openTransaction();
+       catName = normalizeIdentifier(catName);
+       db = normalizeIdentifier(db);
+       table = normalizeIdentifier(table);
+       query = pm.newQuery(MTable.class,
+           "tableName == table && database.name == db && database.catalogName == catname");
+       query.declareParameters(
+           "java.lang.String table, java.lang.String db, java.lang.String catname");
+       query.setUnique(true);
+       LOG.debug("Executing getMTable for " +
+           TableName.getQualified(catName, db, table));
+       mtbl = (MTable) query.execute(table, db, catName);
+       pm.retrieve(mtbl);
+       // Retrieving CD can be expensive and unnecessary, so do it only when required.
+       if (mtbl != null && retrieveCD) {
+         pm.retrieve(mtbl.getSd());
+         pm.retrieveAll(mtbl.getSd().getCD());
+         nmtbl.mcd = mtbl.getSd().getCD();
+       }
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     nmtbl.mtbl = mtbl;
+     return nmtbl;
+   }
+ 
+   private MCreationMetadata getCreationMetadata(String catName, String dbName, String tblName) {
+     boolean commited = false;
+     MCreationMetadata mcm = null;
+     Query query = null;
+     try {
+       openTransaction();
+       query = pm.newQuery(
+           MCreationMetadata.class, "tblName == table && dbName == db && catalogName == cat");
+       query.declareParameters("java.lang.String table, java.lang.String db, java.lang.String cat");
+       query.setUnique(true);
+       mcm = (MCreationMetadata) query.execute(tblName, dbName, catName);
+       pm.retrieve(mcm);
+       commited = commitTransaction();
+     } finally {
+       rollbackAndCleanup(commited, query);
+     }
+     return mcm;
+   }
+ 
+   private MTable getMTable(String catName, String db, String table) {
+     AttachedMTableInfo nmtbl = getMTable(catName, db, table, false);
+     return nmtbl.mtbl;
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String db, List<String> tbl_names)
+       throws MetaException, UnknownDBException {
+     List<Table> tables = new ArrayList<>();
+     boolean committed = false;
+     Query dbExistsQuery = null;
+     Query query = null;
+     try {
+       openTransaction();
+       db = normalizeIdentifier(db);
+       catName = normalizeIdentifier(catName);
+ 
+       List<String> lowered_tbl_names = new ArrayList<>(tbl_names.size());
+       for (String t : tbl_names) {
+         lowered_tbl_names.add(normalizeIdentifier(t));
+       }
+       query = pm.newQuery(MTable.class);
+       query.setFilter("database.name == db && database.catalogName == cat && tbl_names.contains(tableName)");
+       query.declareParameters("java.lang.String db, java.lang.String cat, java.util.Collection tbl_names");
+       Collection mtables = (Collection) query.execute(db, catName, lowered_tbl_names);
+       if (mtables == null || mtables.isEmpty()) {
+         // Need to differentiate between an unmatched pattern and a non-existent database
+         dbExistsQuery = pm.newQuery(MDatabase.class, "name == db && catalogName == cat");
+         dbExistsQuery.declareParameters("java.lang.String db, java.lang.String cat");
+         dbExistsQuery.setUnique(true);
+         dbExistsQuery.setResult("name");
+         String dbNameIfExists = (String) dbExistsQuery.execute(db, catName);
+         if (org.apache.commons.lang.StringUtils.isEmpty(dbNameIfExists)) {
+           throw new UnknownDBException("Could not find database " +
+               DatabaseName.getQualified(catName, db));
+         }
+       } else {
+         for (Iterator iter = mtables.iterator(); iter.hasNext(); ) {
+           Table tbl = convertToTable((MTable) iter.next());
+           // Retrieve creation metadata if needed
+           if (TableType.MATERIALIZED_VIEW.toString().equals(tbl.getTableType())) {
+             tbl.setCreationMetadata(
+                 convertToCreationMetadata(
+                     getCreationMetadata(tbl.getCatName(), tbl.getDbName(), tbl.getTableName())));
+           }
+           tables.add(tbl);
+         }
+       }
+       committed = commitTransaction();
+     } finally {
+       rollbackAndCleanup(committed, query);
+       if (dbExistsQuery != null) {
+         dbExistsQuery.closeAll();
+       }
+     }
+     return tables;
+   }
+ 
+   /** Makes shallow copy of a list to avoid DataNucleus mucking with our objects. */
+   private <T> List<T> convertList(List<T> dnList) {
+     return (dnList == null) ? null : Lists.newArrayList(dnList);
+   }
+ 
+   /** Makes shallow copy of a map to avoid DataNucleus mucking with our objects. */
+   private Map<String, String> convertMap(Map<String, String> dnMap) {
+     return MetaStoreUtils.trimMapNulls(dnMap,
+         MetastoreConf.getBoolVar(getConf(), ConfVars.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS));
+   }
+ 
+   private Table convertToTable(MTable mtbl) throws MetaException {
+     if (mtbl == null) {
+       return null;
+     }
+     String tableType = mtbl.getTableType();
+     if (tableType == null) {
+       // for backwards compatibility with old metastore persistence
+       if (mtbl.getViewOriginalText() != null) {
+         tableType = TableType.VIRTUAL_VIEW.toString();
+       } else if (Boolean.parseBoolean(mtbl.getParameters().get("EXTERNAL"))) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       } else {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+     final Table t = new Table(mtbl.getTableName(), mtbl.getDatabase().getName(), mtbl
+         .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl
+         .getRetention(), convertToStorageDescriptor(mtbl.getSd()),
+         convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()),
+         mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType);
+ 
+     if (Strings.isNullOrEmpty(mtbl.getOwnerType())) {
+       // Before the ownerType exists in an old Hive schema, USER was the default type for owner.
+       // Let's set the default to USER to keep backward compatibility.
+       t.setOwnerType(PrincipalType.USER);
+     } else {
+       t.setOwnerType(PrincipalType.valueOf(mtbl.getOwnerType()));
+     }
+ 
+     t.setRewriteEnabled(mtbl.isRewriteEnabled());
+     t.setCatName(mtbl.getDatabase().getCatalogName());
+     return t;
+   }
+ 
+   private MTable convertToMTable(Table tbl) throws InvalidObjectException,
+       MetaException {
+     if (tbl == null) {
+       return null;
+     }
+     MDatabase mdb = null;
+     String catName = tbl.isSetCatName() ? tbl.getCatName() : getDefaultCatalog(conf);
+     try {
+       mdb = getMDatabase(catName, tbl.getDbName());
+     } catch (NoSuchObjectException e) {
+       LOG.error("Could not convert to MTable", e);
+       throw new InvalidObjectException("Database " +
+           DatabaseName.getQualified(catName, tbl.getDbName()) + " doesn't exist.");
+     }
+ 
+     // If the table has property EXTERNAL set, update table type
+     // accordingly
+     String tableType = tbl.getTableType();
+     boolean isExternal = Boolean.parseBoolean(tbl.getParameters().get("EXTERNAL"));
+     if (TableType.MANAGED_TABLE.toString().equals(tableType)) {
+       if (isExternal) {
+         tableType = TableType.EXTERNAL_TABLE.toString();
+       }
+     }
+     if (TableType.EXTERNAL_TABLE.toString().equals(tableType)) {
+       if (!isExternal) {
+         tableType = TableType.MANAGED_TABLE.toString();
+       }
+     }
+ 
+     PrincipalType ownerPrincipalType = tbl.getOwnerType();
+     String ownerType = (ownerPrincipalType == null) ? PrincipalType.USER.name() : ownerPrincipalType.name();
+ 
+     // A new table is always created with a new column descriptor
 -    return new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
++    MTable mtable = new MTable(normalizeIdentifier(tbl.getTableName()), mdb,
+         convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), ownerType, tbl
+         .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(),
+         convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(),
+         tbl.getViewOriginalText(), tbl.getViewExpandedText(), tbl.isRewriteEnabled(),
+         tableType);
++    if (TxnUtils.isTransactionalTable(tbl)) {
++      mtable.setWriteId(tbl.getWriteId());
++    }
++    return mtable;
+   }
+ 
+   private List<MFieldSchema> convertToMFieldSchemas(List<FieldSchema> keys) {
+     List<MFieldSchema> mkeys = null;
+     if (keys != null) {
+       mkeys = new ArrayList<>(keys.size());
+       for (FieldSchema part : keys) {
+         mkeys.add(new MFieldSchema(part.getName().toLowerCase(),
+             part.getType(), part.getComment()));
+       }
+     }
+     return mkeys;
+   }
+ 
+   private List<FieldSchema> convertToFieldSchemas(List<MFieldSchema> mkeys) {
+     List<FieldSchema> keys = null;
+     if (mkeys != null) {
+       keys = new ArrayList<>(mkeys.size());
+       for (MFieldSchema part : mkeys) {
+         keys.add(new FieldSchema(part.getName(), part.getType(), part
+             .getComment()));
+       }
+     }
+     return keys;
+   }
+ 
+   private List<MOrder> convertToMOrders(List<Order> keys) {
+     List<MOrder> mkeys = null;
+     if (keys != null) {
+       mkeys = new ArrayList<>(keys.size());
+       for (Order part : keys) {
+         mkeys.add(new MOrder(normalizeIdentifier(part.getCol()), part.getOrder()));
+       }
+     }
+     return mkeys;
+   }
+ 
+   private List<Order> convertToOrders(List<MOrder> mkeys) {
+     List<Order> keys = null;
+     if (mkeys != null) {
+       keys = new ArrayList<>(mkeys.size());
+       for (MOrder part : mkeys) {
+         keys.add(new Order(part.getCol(), part.getOrder()));
+       }
+     }
+     return keys;
+   }
+ 
+   private SerDeInfo convertToSerDeInfo(MSerDeInfo ms) throws MetaException {
+     if (ms == null) {
+       throw new MetaException("Invalid SerDeInfo object");
+     }
+     SerDeInfo serde =
+         new SerDeInfo(ms.getName(), ms.getSerializationLib(), convertMap(ms.getParameters()));
+     if (ms.getDescription() != null) {
+       serde.setDescription(ms.getDescription());
+     }
+     if (ms.getSerializerClass() != null) {
+       serde.setSerializerClass(ms.getSerializerClass());
+     }
+     if (ms.getDeserializerClass() != null) {
+       serde.setDeserializerClass(ms.getDeserializerClass());
+     }
+     if (ms.getSerdeType() > 0) {
+       serde.setSerdeType(SerdeType.findByValue(ms.getSerdeType()));
+     }
+     return serde;
+   }
+ 
+   private MSerDeInfo convertToMSerDeInfo(SerDeInfo ms) throws MetaException {
+     if (ms == null) {
+       throw new MetaException("Invalid SerDeInfo object");
+     }
+     return new MSerDeInfo(ms.getName(), ms.getSerializationLib(), ms.getParameters(),
+         ms.getDescription(), ms.getSerializerClass(), ms.getDeserializerClass(),
+       

<TRUNCATED>

[19/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java
new file mode 100644
index 0000000..6f0e052
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchemaName.java
@@ -0,0 +1,603 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ISchemaName implements org.apache.thrift.TBase<ISchemaName, ISchemaName._Fields>, java.io.Serializable, Cloneable, Comparable<ISchemaName> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ISchemaName");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ISchemaNameStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ISchemaNameTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String dbName; // required
+  private String schemaName; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    DB_NAME((short)2, "dbName"),
+    SCHEMA_NAME((short)3, "schemaName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // SCHEMA_NAME
+          return SCHEMA_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ISchemaName.class, metaDataMap);
+  }
+
+  public ISchemaName() {
+  }
+
+  public ISchemaName(
+    String catName,
+    String dbName,
+    String schemaName)
+  {
+    this();
+    this.catName = catName;
+    this.dbName = dbName;
+    this.schemaName = schemaName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ISchemaName(ISchemaName other) {
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetSchemaName()) {
+      this.schemaName = other.schemaName;
+    }
+  }
+
+  public ISchemaName deepCopy() {
+    return new ISchemaName(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.dbName = null;
+    this.schemaName = null;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getSchemaName() {
+    return this.schemaName;
+  }
+
+  public void setSchemaName(String schemaName) {
+    this.schemaName = schemaName;
+  }
+
+  public void unsetSchemaName() {
+    this.schemaName = null;
+  }
+
+  /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaName() {
+    return this.schemaName != null;
+  }
+
+  public void setSchemaNameIsSet(boolean value) {
+    if (!value) {
+      this.schemaName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case SCHEMA_NAME:
+      if (value == null) {
+        unsetSchemaName();
+      } else {
+        setSchemaName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case SCHEMA_NAME:
+      return getSchemaName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDbName();
+    case SCHEMA_NAME:
+      return isSetSchemaName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ISchemaName)
+      return this.equals((ISchemaName)that);
+    return false;
+  }
+
+  public boolean equals(ISchemaName that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_schemaName = true && this.isSetSchemaName();
+    boolean that_present_schemaName = true && that.isSetSchemaName();
+    if (this_present_schemaName || that_present_schemaName) {
+      if (!(this_present_schemaName && that_present_schemaName))
+        return false;
+      if (!this.schemaName.equals(that.schemaName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_schemaName = true && (isSetSchemaName());
+    list.add(present_schemaName);
+    if (present_schemaName)
+      list.add(schemaName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ISchemaName other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ISchemaName(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("schemaName:");
+    if (this.schemaName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaName);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ISchemaNameStandardSchemeFactory implements SchemeFactory {
+    public ISchemaNameStandardScheme getScheme() {
+      return new ISchemaNameStandardScheme();
+    }
+  }
+
+  private static class ISchemaNameStandardScheme extends StandardScheme<ISchemaName> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ISchemaName struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // SCHEMA_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.schemaName = iprot.readString();
+              struct.setSchemaNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ISchemaName struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.schemaName != null) {
+        oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC);
+        oprot.writeString(struct.schemaName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ISchemaNameTupleSchemeFactory implements SchemeFactory {
+    public ISchemaNameTupleScheme getScheme() {
+      return new ISchemaNameTupleScheme();
+    }
+  }
+
+  private static class ISchemaNameTupleScheme extends TupleScheme<ISchemaName> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetDbName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetSchemaName()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetSchemaName()) {
+        oprot.writeString(struct.schemaName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ISchemaName struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.schemaName = iprot.readString();
+        struct.setSchemaNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
new file mode 100644
index 0000000..4a9824b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java
@@ -0,0 +1,855 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InsertEventRequestData implements org.apache.thrift.TBase<InsertEventRequestData, InsertEventRequestData._Fields>, java.io.Serializable, Cloneable, Comparable<InsertEventRequestData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InsertEventRequestData");
+
+  private static final org.apache.thrift.protocol.TField REPLACE_FIELD_DESC = new org.apache.thrift.protocol.TField("replace", org.apache.thrift.protocol.TType.BOOL, (short)1);
+  private static final org.apache.thrift.protocol.TField FILES_ADDED_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAdded", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField FILES_ADDED_CHECKSUM_FIELD_DESC = new org.apache.thrift.protocol.TField("filesAddedChecksum", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField SUB_DIRECTORY_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("subDirectoryList", org.apache.thrift.protocol.TType.LIST, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new InsertEventRequestDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new InsertEventRequestDataTupleSchemeFactory());
+  }
+
+  private boolean replace; // optional
+  private List<String> filesAdded; // required
+  private List<String> filesAddedChecksum; // optional
+  private List<String> subDirectoryList; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    REPLACE((short)1, "replace"),
+    FILES_ADDED((short)2, "filesAdded"),
+    FILES_ADDED_CHECKSUM((short)3, "filesAddedChecksum"),
+    SUB_DIRECTORY_LIST((short)4, "subDirectoryList");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // REPLACE
+          return REPLACE;
+        case 2: // FILES_ADDED
+          return FILES_ADDED;
+        case 3: // FILES_ADDED_CHECKSUM
+          return FILES_ADDED_CHECKSUM;
+        case 4: // SUB_DIRECTORY_LIST
+          return SUB_DIRECTORY_LIST;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __REPLACE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.REPLACE,_Fields.FILES_ADDED_CHECKSUM,_Fields.SUB_DIRECTORY_LIST};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.REPLACE, new org.apache.thrift.meta_data.FieldMetaData("replace", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.FILES_ADDED, new org.apache.thrift.meta_data.FieldMetaData("filesAdded", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.FILES_ADDED_CHECKSUM, new org.apache.thrift.meta_data.FieldMetaData("filesAddedChecksum", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.SUB_DIRECTORY_LIST, new org.apache.thrift.meta_data.FieldMetaData("subDirectoryList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InsertEventRequestData.class, metaDataMap);
+  }
+
+  public InsertEventRequestData() {
+  }
+
+  public InsertEventRequestData(
+    List<String> filesAdded)
+  {
+    this();
+    this.filesAdded = filesAdded;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public InsertEventRequestData(InsertEventRequestData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.replace = other.replace;
+    if (other.isSetFilesAdded()) {
+      List<String> __this__filesAdded = new ArrayList<String>(other.filesAdded);
+      this.filesAdded = __this__filesAdded;
+    }
+    if (other.isSetFilesAddedChecksum()) {
+      List<String> __this__filesAddedChecksum = new ArrayList<String>(other.filesAddedChecksum);
+      this.filesAddedChecksum = __this__filesAddedChecksum;
+    }
+    if (other.isSetSubDirectoryList()) {
+      List<String> __this__subDirectoryList = new ArrayList<String>(other.subDirectoryList);
+      this.subDirectoryList = __this__subDirectoryList;
+    }
+  }
+
+  public InsertEventRequestData deepCopy() {
+    return new InsertEventRequestData(this);
+  }
+
+  @Override
+  public void clear() {
+    setReplaceIsSet(false);
+    this.replace = false;
+    this.filesAdded = null;
+    this.filesAddedChecksum = null;
+    this.subDirectoryList = null;
+  }
+
+  public boolean isReplace() {
+    return this.replace;
+  }
+
+  public void setReplace(boolean replace) {
+    this.replace = replace;
+    setReplaceIsSet(true);
+  }
+
+  public void unsetReplace() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REPLACE_ISSET_ID);
+  }
+
+  /** Returns true if field replace is set (has been assigned a value) and false otherwise */
+  public boolean isSetReplace() {
+    return EncodingUtils.testBit(__isset_bitfield, __REPLACE_ISSET_ID);
+  }
+
+  public void setReplaceIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REPLACE_ISSET_ID, value);
+  }
+
+  public int getFilesAddedSize() {
+    return (this.filesAdded == null) ? 0 : this.filesAdded.size();
+  }
+
+  public java.util.Iterator<String> getFilesAddedIterator() {
+    return (this.filesAdded == null) ? null : this.filesAdded.iterator();
+  }
+
+  public void addToFilesAdded(String elem) {
+    if (this.filesAdded == null) {
+      this.filesAdded = new ArrayList<String>();
+    }
+    this.filesAdded.add(elem);
+  }
+
+  public List<String> getFilesAdded() {
+    return this.filesAdded;
+  }
+
+  public void setFilesAdded(List<String> filesAdded) {
+    this.filesAdded = filesAdded;
+  }
+
+  public void unsetFilesAdded() {
+    this.filesAdded = null;
+  }
+
+  /** Returns true if field filesAdded is set (has been assigned a value) and false otherwise */
+  public boolean isSetFilesAdded() {
+    return this.filesAdded != null;
+  }
+
+  public void setFilesAddedIsSet(boolean value) {
+    if (!value) {
+      this.filesAdded = null;
+    }
+  }
+
+  public int getFilesAddedChecksumSize() {
+    return (this.filesAddedChecksum == null) ? 0 : this.filesAddedChecksum.size();
+  }
+
+  public java.util.Iterator<String> getFilesAddedChecksumIterator() {
+    return (this.filesAddedChecksum == null) ? null : this.filesAddedChecksum.iterator();
+  }
+
+  public void addToFilesAddedChecksum(String elem) {
+    if (this.filesAddedChecksum == null) {
+      this.filesAddedChecksum = new ArrayList<String>();
+    }
+    this.filesAddedChecksum.add(elem);
+  }
+
+  public List<String> getFilesAddedChecksum() {
+    return this.filesAddedChecksum;
+  }
+
+  public void setFilesAddedChecksum(List<String> filesAddedChecksum) {
+    this.filesAddedChecksum = filesAddedChecksum;
+  }
+
+  public void unsetFilesAddedChecksum() {
+    this.filesAddedChecksum = null;
+  }
+
+  /** Returns true if field filesAddedChecksum is set (has been assigned a value) and false otherwise */
+  public boolean isSetFilesAddedChecksum() {
+    return this.filesAddedChecksum != null;
+  }
+
+  public void setFilesAddedChecksumIsSet(boolean value) {
+    if (!value) {
+      this.filesAddedChecksum = null;
+    }
+  }
+
+  public int getSubDirectoryListSize() {
+    return (this.subDirectoryList == null) ? 0 : this.subDirectoryList.size();
+  }
+
+  public java.util.Iterator<String> getSubDirectoryListIterator() {
+    return (this.subDirectoryList == null) ? null : this.subDirectoryList.iterator();
+  }
+
+  public void addToSubDirectoryList(String elem) {
+    if (this.subDirectoryList == null) {
+      this.subDirectoryList = new ArrayList<String>();
+    }
+    this.subDirectoryList.add(elem);
+  }
+
+  public List<String> getSubDirectoryList() {
+    return this.subDirectoryList;
+  }
+
+  public void setSubDirectoryList(List<String> subDirectoryList) {
+    this.subDirectoryList = subDirectoryList;
+  }
+
+  public void unsetSubDirectoryList() {
+    this.subDirectoryList = null;
+  }
+
+  /** Returns true if field subDirectoryList is set (has been assigned a value) and false otherwise */
+  public boolean isSetSubDirectoryList() {
+    return this.subDirectoryList != null;
+  }
+
+  public void setSubDirectoryListIsSet(boolean value) {
+    if (!value) {
+      this.subDirectoryList = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case REPLACE:
+      if (value == null) {
+        unsetReplace();
+      } else {
+        setReplace((Boolean)value);
+      }
+      break;
+
+    case FILES_ADDED:
+      if (value == null) {
+        unsetFilesAdded();
+      } else {
+        setFilesAdded((List<String>)value);
+      }
+      break;
+
+    case FILES_ADDED_CHECKSUM:
+      if (value == null) {
+        unsetFilesAddedChecksum();
+      } else {
+        setFilesAddedChecksum((List<String>)value);
+      }
+      break;
+
+    case SUB_DIRECTORY_LIST:
+      if (value == null) {
+        unsetSubDirectoryList();
+      } else {
+        setSubDirectoryList((List<String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case REPLACE:
+      return isReplace();
+
+    case FILES_ADDED:
+      return getFilesAdded();
+
+    case FILES_ADDED_CHECKSUM:
+      return getFilesAddedChecksum();
+
+    case SUB_DIRECTORY_LIST:
+      return getSubDirectoryList();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case REPLACE:
+      return isSetReplace();
+    case FILES_ADDED:
+      return isSetFilesAdded();
+    case FILES_ADDED_CHECKSUM:
+      return isSetFilesAddedChecksum();
+    case SUB_DIRECTORY_LIST:
+      return isSetSubDirectoryList();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof InsertEventRequestData)
+      return this.equals((InsertEventRequestData)that);
+    return false;
+  }
+
+  public boolean equals(InsertEventRequestData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_replace = true && this.isSetReplace();
+    boolean that_present_replace = true && that.isSetReplace();
+    if (this_present_replace || that_present_replace) {
+      if (!(this_present_replace && that_present_replace))
+        return false;
+      if (this.replace != that.replace)
+        return false;
+    }
+
+    boolean this_present_filesAdded = true && this.isSetFilesAdded();
+    boolean that_present_filesAdded = true && that.isSetFilesAdded();
+    if (this_present_filesAdded || that_present_filesAdded) {
+      if (!(this_present_filesAdded && that_present_filesAdded))
+        return false;
+      if (!this.filesAdded.equals(that.filesAdded))
+        return false;
+    }
+
+    boolean this_present_filesAddedChecksum = true && this.isSetFilesAddedChecksum();
+    boolean that_present_filesAddedChecksum = true && that.isSetFilesAddedChecksum();
+    if (this_present_filesAddedChecksum || that_present_filesAddedChecksum) {
+      if (!(this_present_filesAddedChecksum && that_present_filesAddedChecksum))
+        return false;
+      if (!this.filesAddedChecksum.equals(that.filesAddedChecksum))
+        return false;
+    }
+
+    boolean this_present_subDirectoryList = true && this.isSetSubDirectoryList();
+    boolean that_present_subDirectoryList = true && that.isSetSubDirectoryList();
+    if (this_present_subDirectoryList || that_present_subDirectoryList) {
+      if (!(this_present_subDirectoryList && that_present_subDirectoryList))
+        return false;
+      if (!this.subDirectoryList.equals(that.subDirectoryList))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_replace = true && (isSetReplace());
+    list.add(present_replace);
+    if (present_replace)
+      list.add(replace);
+
+    boolean present_filesAdded = true && (isSetFilesAdded());
+    list.add(present_filesAdded);
+    if (present_filesAdded)
+      list.add(filesAdded);
+
+    boolean present_filesAddedChecksum = true && (isSetFilesAddedChecksum());
+    list.add(present_filesAddedChecksum);
+    if (present_filesAddedChecksum)
+      list.add(filesAddedChecksum);
+
+    boolean present_subDirectoryList = true && (isSetSubDirectoryList());
+    list.add(present_subDirectoryList);
+    if (present_subDirectoryList)
+      list.add(subDirectoryList);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(InsertEventRequestData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetReplace()).compareTo(other.isSetReplace());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReplace()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replace, other.replace);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFilesAdded()).compareTo(other.isSetFilesAdded());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFilesAdded()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filesAdded, other.filesAdded);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFilesAddedChecksum()).compareTo(other.isSetFilesAddedChecksum());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFilesAddedChecksum()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.filesAddedChecksum, other.filesAddedChecksum);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSubDirectoryList()).compareTo(other.isSetSubDirectoryList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSubDirectoryList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.subDirectoryList, other.subDirectoryList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("InsertEventRequestData(");
+    boolean first = true;
+
+    if (isSetReplace()) {
+      sb.append("replace:");
+      sb.append(this.replace);
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("filesAdded:");
+    if (this.filesAdded == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.filesAdded);
+    }
+    first = false;
+    if (isSetFilesAddedChecksum()) {
+      if (!first) sb.append(", ");
+      sb.append("filesAddedChecksum:");
+      if (this.filesAddedChecksum == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.filesAddedChecksum);
+      }
+      first = false;
+    }
+    if (isSetSubDirectoryList()) {
+      if (!first) sb.append(", ");
+      sb.append("subDirectoryList:");
+      if (this.subDirectoryList == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.subDirectoryList);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFilesAdded()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'filesAdded' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class InsertEventRequestDataStandardSchemeFactory implements SchemeFactory {
+    public InsertEventRequestDataStandardScheme getScheme() {
+      return new InsertEventRequestDataStandardScheme();
+    }
+  }
+
+  private static class InsertEventRequestDataStandardScheme extends StandardScheme<InsertEventRequestData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // REPLACE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.replace = iprot.readBool();
+              struct.setReplaceIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // FILES_ADDED
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list732 = iprot.readListBegin();
+                struct.filesAdded = new ArrayList<String>(_list732.size);
+                String _elem733;
+                for (int _i734 = 0; _i734 < _list732.size; ++_i734)
+                {
+                  _elem733 = iprot.readString();
+                  struct.filesAdded.add(_elem733);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFilesAddedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // FILES_ADDED_CHECKSUM
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list735 = iprot.readListBegin();
+                struct.filesAddedChecksum = new ArrayList<String>(_list735.size);
+                String _elem736;
+                for (int _i737 = 0; _i737 < _list735.size; ++_i737)
+                {
+                  _elem736 = iprot.readString();
+                  struct.filesAddedChecksum.add(_elem736);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFilesAddedChecksumIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // SUB_DIRECTORY_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list738 = iprot.readListBegin();
+                struct.subDirectoryList = new ArrayList<String>(_list738.size);
+                String _elem739;
+                for (int _i740 = 0; _i740 < _list738.size; ++_i740)
+                {
+                  _elem739 = iprot.readString();
+                  struct.subDirectoryList.add(_elem739);
+                }
+                iprot.readListEnd();
+              }
+              struct.setSubDirectoryListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequestData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetReplace()) {
+        oprot.writeFieldBegin(REPLACE_FIELD_DESC);
+        oprot.writeBool(struct.replace);
+        oprot.writeFieldEnd();
+      }
+      if (struct.filesAdded != null) {
+        oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size()));
+          for (String _iter741 : struct.filesAdded)
+          {
+            oprot.writeString(_iter741);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.filesAddedChecksum != null) {
+        if (struct.isSetFilesAddedChecksum()) {
+          oprot.writeFieldBegin(FILES_ADDED_CHECKSUM_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAddedChecksum.size()));
+            for (String _iter742 : struct.filesAddedChecksum)
+            {
+              oprot.writeString(_iter742);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.subDirectoryList != null) {
+        if (struct.isSetSubDirectoryList()) {
+          oprot.writeFieldBegin(SUB_DIRECTORY_LIST_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.subDirectoryList.size()));
+            for (String _iter743 : struct.subDirectoryList)
+            {
+              oprot.writeString(_iter743);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class InsertEventRequestDataTupleSchemeFactory implements SchemeFactory {
+    public InsertEventRequestDataTupleScheme getScheme() {
+      return new InsertEventRequestDataTupleScheme();
+    }
+  }
+
+  private static class InsertEventRequestDataTupleScheme extends TupleScheme<InsertEventRequestData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.filesAdded.size());
+        for (String _iter744 : struct.filesAdded)
+        {
+          oprot.writeString(_iter744);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetReplace()) {
+        optionals.set(0);
+      }
+      if (struct.isSetFilesAddedChecksum()) {
+        optionals.set(1);
+      }
+      if (struct.isSetSubDirectoryList()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetReplace()) {
+        oprot.writeBool(struct.replace);
+      }
+      if (struct.isSetFilesAddedChecksum()) {
+        {
+          oprot.writeI32(struct.filesAddedChecksum.size());
+          for (String _iter745 : struct.filesAddedChecksum)
+          {
+            oprot.writeString(_iter745);
+          }
+        }
+      }
+      if (struct.isSetSubDirectoryList()) {
+        {
+          oprot.writeI32(struct.subDirectoryList.size());
+          for (String _iter746 : struct.subDirectoryList)
+          {
+            oprot.writeString(_iter746);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list747 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.filesAdded = new ArrayList<String>(_list747.size);
+        String _elem748;
+        for (int _i749 = 0; _i749 < _list747.size; ++_i749)
+        {
+          _elem748 = iprot.readString();
+          struct.filesAdded.add(_elem748);
+        }
+      }
+      struct.setFilesAddedIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.replace = iprot.readBool();
+        struct.setReplaceIsSet(true);
+      }
+      if (incoming.get(1)) {
+        {
+          org.apache.thrift.protocol.TList _list750 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.filesAddedChecksum = new ArrayList<String>(_list750.size);
+          String _elem751;
+          for (int _i752 = 0; _i752 < _list750.size; ++_i752)
+          {
+            _elem751 = iprot.readString();
+            struct.filesAddedChecksum.add(_elem751);
+          }
+        }
+        struct.setFilesAddedChecksumIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TList _list753 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.subDirectoryList = new ArrayList<String>(_list753.size);
+          String _elem754;
+          for (int _i755 = 0; _i755 < _list753.size; ++_i755)
+          {
+            _elem754 = iprot.readString();
+            struct.subDirectoryList.add(_elem754);
+          }
+        }
+        struct.setSubDirectoryListIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
new file mode 100644
index 0000000..23c9aca
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidInputException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InvalidInputException extends TException implements org.apache.thrift.TBase<InvalidInputException, InvalidInputException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidInputException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidInputException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new InvalidInputExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new InvalidInputExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidInputException.class, metaDataMap);
+  }
+
+  public InvalidInputException() {
+  }
+
+  public InvalidInputException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public InvalidInputException(InvalidInputException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public InvalidInputException deepCopy() {
+    return new InvalidInputException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof InvalidInputException)
+      return this.equals((InvalidInputException)that);
+    return false;
+  }
+
+  public boolean equals(InvalidInputException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(InvalidInputException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("InvalidInputException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class InvalidInputExceptionStandardSchemeFactory implements SchemeFactory {
+    public InvalidInputExceptionStandardScheme getScheme() {
+      return new InvalidInputExceptionStandardScheme();
+    }
+  }
+
+  private static class InvalidInputExceptionStandardScheme extends StandardScheme<InvalidInputException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidInputException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidInputException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class InvalidInputExceptionTupleSchemeFactory implements SchemeFactory {
+    public InvalidInputExceptionTupleScheme getScheme() {
+      return new InvalidInputExceptionTupleScheme();
+    }
+  }
+
+  private static class InvalidInputExceptionTupleScheme extends TupleScheme<InvalidInputException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, InvalidInputException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, InvalidInputException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
new file mode 100644
index 0000000..c36d936
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidObjectException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InvalidObjectException extends TException implements org.apache.thrift.TBase<InvalidObjectException, InvalidObjectException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidObjectException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidObjectException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new InvalidObjectExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new InvalidObjectExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidObjectException.class, metaDataMap);
+  }
+
+  public InvalidObjectException() {
+  }
+
+  public InvalidObjectException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public InvalidObjectException(InvalidObjectException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public InvalidObjectException deepCopy() {
+    return new InvalidObjectException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof InvalidObjectException)
+      return this.equals((InvalidObjectException)that);
+    return false;
+  }
+
+  public boolean equals(InvalidObjectException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(InvalidObjectException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("InvalidObjectException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class InvalidObjectExceptionStandardSchemeFactory implements SchemeFactory {
+    public InvalidObjectExceptionStandardScheme getScheme() {
+      return new InvalidObjectExceptionStandardScheme();
+    }
+  }
+
+  private static class InvalidObjectExceptionStandardScheme extends StandardScheme<InvalidObjectException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidObjectException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidObjectException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class InvalidObjectExceptionTupleSchemeFactory implements SchemeFactory {
+    public InvalidObjectExceptionTupleScheme getScheme() {
+      return new InvalidObjectExceptionTupleScheme();
+    }
+  }
+
+  private static class InvalidObjectExceptionTupleScheme extends TupleScheme<InvalidObjectException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, InvalidObjectException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, InvalidObjectException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+


[68/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index 0000000,73a518d..681e1e5
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@@ -1,0 -1,1682 +1,1718 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.hive.common.TableName;
 -import org.apache.hadoop.hive.metastore.api.CreationMetadata;
 -import org.apache.hadoop.hive.metastore.api.ISchemaName;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersionDescriptor;
 -import org.apache.hadoop.hive.metastore.api.WMFullResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
++import org.apache.hadoop.hive.metastore.api.*;
+ 
+ import java.lang.annotation.ElementType;
+ import java.lang.annotation.Retention;
+ import java.lang.annotation.RetentionPolicy;
+ import java.lang.annotation.Target;
+ import java.nio.ByteBuffer;
+ import java.util.List;
+ import java.util.Map;
+ 
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
 -import org.apache.hadoop.hive.metastore.api.AggrStats;
 -import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
 -import org.apache.hadoop.hive.metastore.api.Catalog;
 -import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
 -import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
 -import org.apache.hadoop.hive.metastore.api.Database;
 -import org.apache.hadoop.hive.metastore.api.FieldSchema;
 -import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 -import org.apache.hadoop.hive.metastore.api.Function;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 -import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
 -import org.apache.hadoop.hive.metastore.api.ISchema;
 -import org.apache.hadoop.hive.metastore.api.InvalidInputException;
 -import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
 -import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
 -import org.apache.hadoop.hive.metastore.api.InvalidPartitionException;
 -import org.apache.hadoop.hive.metastore.api.MetaException;
 -import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
 -import org.apache.hadoop.hive.metastore.api.NotificationEvent;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountRequest;
 -import org.apache.hadoop.hive.metastore.api.NotificationEventsCountResponse;
 -import org.apache.hadoop.hive.metastore.api.Partition;
 -import org.apache.hadoop.hive.metastore.api.PartitionEventType;
 -import org.apache.hadoop.hive.metastore.api.PartitionValuesResponse;
 -import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet;
 -import org.apache.hadoop.hive.metastore.api.PrincipalType;
 -import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 -import org.apache.hadoop.hive.metastore.api.Role;
 -import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant;
 -import org.apache.hadoop.hive.metastore.api.RuntimeStat;
 -import org.apache.hadoop.hive.metastore.api.SQLCheckConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLDefaultConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
 -import org.apache.hadoop.hive.metastore.api.SQLNotNullConstraint;
 -import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
 -import org.apache.hadoop.hive.metastore.api.SQLUniqueConstraint;
 -import org.apache.hadoop.hive.metastore.api.SchemaVersion;
 -import org.apache.hadoop.hive.metastore.api.SerDeInfo;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableMeta;
 -import org.apache.hadoop.hive.metastore.api.Type;
 -import org.apache.hadoop.hive.metastore.api.UnknownDBException;
 -import org.apache.hadoop.hive.metastore.api.UnknownPartitionException;
 -import org.apache.hadoop.hive.metastore.api.UnknownTableException;
 -import org.apache.hadoop.hive.metastore.api.WMMapping;
 -import org.apache.hadoop.hive.metastore.api.WMNullablePool;
 -import org.apache.hadoop.hive.metastore.api.WMNullableResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMPool;
 -import org.apache.hadoop.hive.metastore.api.WMResourcePlan;
 -import org.apache.hadoop.hive.metastore.api.WMTrigger;
 -import org.apache.hadoop.hive.metastore.api.WMValidateResourcePlanResponse;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.ColStatsObjWithSourceInfo;
+ import org.apache.thrift.TException;
+ 
+ public interface RawStore extends Configurable {
+ 
+   /***
+    * Annotation to skip retries
+    */
+   @Target(value = ElementType.METHOD)
+   @Retention(value = RetentionPolicy.RUNTIME)
+   @interface CanNotRetry {
+   }
+ 
+   void shutdown();
+ 
+   /**
+    * Opens a new one or the one already created Every call of this function must
+    * have corresponding commit or rollback function call
+    *
+    * @return an active transaction
+    */
+ 
+   boolean openTransaction();
+ 
+   /**
+    * if this is the commit of the first open call then an actual commit is
+    * called.
+    *
+    * @return true or false
+    */
+   @CanNotRetry
+   boolean commitTransaction();
+ 
+   boolean isActiveTransaction();
+ 
+   /**
+    * Rolls back the current transaction if it is active
+    */
+   @CanNotRetry
+   void rollbackTransaction();
+ 
+   /**
+    * Create a new catalog.
+    * @param cat Catalog to create.
+    * @throws MetaException if something goes wrong, usually in storing it to the database.
+    */
+   void createCatalog(Catalog cat) throws MetaException;
+ 
+   /**
+    * Alter an existing catalog.  Only description and location can be changed, and the change of
+    * location is for internal use only.
+    * @param catName name of the catalog to alter.
+    * @param cat new version of the catalog.
+    * @throws MetaException something went wrong, usually in the database.
+    * @throws InvalidOperationException attempt to change something about the catalog that is not
+    * changeable, like the name.
+    */
+   void alterCatalog(String catName, Catalog cat) throws MetaException, InvalidOperationException;
+ 
+   /**
+    * Get a catalog.
+    * @param catalogName name of the catalog.
+    * @return The catalog.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException if something goes wrong, usually in reading it from the database.
+    */
+   Catalog getCatalog(String catalogName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get all the catalogs.
+    * @return list of names of all catalogs in the system
+    * @throws MetaException if something goes wrong, usually in reading from the database.
+    */
+   List<String> getCatalogs() throws MetaException;
+ 
+   /**
+    * Drop a catalog.  The catalog must be empty.
+    * @param catalogName name of the catalog to drop.
+    * @throws NoSuchObjectException no catalog of this name exists.
+    * @throws MetaException could mean the catalog isn't empty, could mean general database error.
+    */
+   void dropCatalog(String catalogName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Create a database.
+    * @param db database to create.
+    * @throws InvalidObjectException not sure it actually ever throws this.
+    * @throws MetaException if something goes wrong, usually in writing it to the database.
+    */
+   void createDatabase(Database db)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get a database.
+    * @param catalogName catalog the database is in.
+    * @param name name of the database.
+    * @return the database.
+    * @throws NoSuchObjectException if no such database exists.
+    */
+   Database getDatabase(String catalogName, String name)
+       throws NoSuchObjectException;
+ 
+   /**
+    * Drop a database.
+    * @param catalogName catalog the database is in.
+    * @param dbname name of the database.
+    * @return true if the database was dropped, pretty much always returns this if it returns.
+    * @throws NoSuchObjectException no database in this catalog of this name to drop
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   boolean dropDatabase(String catalogName, String dbname)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Alter a database.
+    * @param catalogName name of the catalog the database is in.
+    * @param dbname name of the database to alter
+    * @param db new version of the database.  This should be complete as it will fully replace the
+    *          existing db object.
+    * @return true if the change succeeds, could fail due to db constraint violations.
+    * @throws NoSuchObjectException no database of this name exists to alter.
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   boolean alterDatabase(String catalogName, String dbname, Database db)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get all database in a catalog having names that match a pattern.
+    * @param catalogName name of the catalog to search for databases in
+    * @param pattern pattern names should match
+    * @return list of matching database names.
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   List<String> getDatabases(String catalogName, String pattern) throws MetaException;
+ 
+   /**
+    * Get names of all the databases in a catalog.
+    * @param catalogName name of the catalog to search for databases in
+    * @return list of names of all databases in the catalog
+    * @throws MetaException something went wrong, usually with the database.
+    */
+   List<String> getAllDatabases(String catalogName) throws MetaException;
+ 
+   boolean createType(Type type);
+ 
+   Type getType(String typeName);
+ 
+   boolean dropType(String typeName);
+ 
+   void createTable(Table tbl) throws InvalidObjectException,
+       MetaException;
+ 
+   /**
+    * Drop a table.
+    * @param catalogName catalog the table is in
+    * @param dbName database the table is in
+    * @param tableName table name
+    * @return true if the table was dropped
+    * @throws MetaException something went wrong, usually in the RDBMS or storage
+    * @throws NoSuchObjectException No table of this name
+    * @throws InvalidObjectException Don't think this is ever actually thrown
+    * @throws InvalidInputException Don't think this is ever actually thrown
+    */
+   boolean dropTable(String catalogName, String dbName, String tableName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Get a table object.
+    * @param catalogName catalog the table is in.
+    * @param dbName database the table is in.
+    * @param tableName table name.
+    * @return table object, or null if no such table exists (wow it would be nice if we either
+    * consistently returned null or consistently threw NoSuchObjectException).
+    * @throws MetaException something went wrong in the RDBMS
+    */
+   Table getTable(String catalogName, String dbName, String tableName) throws MetaException;
+ 
+   /**
++   * Get a table object.
++   * @param catalogName catalog the table is in.
++   * @param dbName database the table is in.
++   * @param tableName table name.
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return table object, or null if no such table exists (wow it would be nice if we either
++   * consistently returned null or consistently threw NoSuchObjectException).
++   * @throws MetaException something went wrong in the RDBMS
++   */
++  Table getTable(String catalogName, String dbName, String tableName,
++                 long txnId, String writeIdList) throws MetaException;
++
++  /**
+    * Add a partition.
+    * @param part partition to add
+    * @return true if the partition was successfully added.
+    * @throws InvalidObjectException the provided partition object is not valid.
+    * @throws MetaException error writing to the RDBMS.
+    */
+   boolean addPartition(Partition part)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a list of partitions to a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param parts list of partitions to be added.
+    * @return true if the operation succeeded.
+    * @throws InvalidObjectException never throws this AFAICT
+    * @throws MetaException the partitions don't belong to the indicated table or error writing to
+    * the RDBMS.
+    */
+   boolean addPartitions(String catName, String dbName, String tblName, List<Partition> parts)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a list of partitions to a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partitionSpec specification for the partition
+    * @param ifNotExists whether it is in an error if the partition already exists.  If true, then
+    *                   it is not an error if the partition exists, if false, it is.
+    * @return whether the partition was created.
+    * @throws InvalidObjectException The passed in partition spec or table specification is invalid.
+    * @throws MetaException error writing to RDBMS.
+    */
+   boolean addPartitions(String catName, String dbName, String tblName,
+                         PartitionSpecProxy partitionSpec, boolean ifNotExists)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get a partition.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param part_vals partition values for this table.
+    * @return the partition.
+    * @throws MetaException error reading from RDBMS.
+    * @throws NoSuchObjectException no partition matching this specification exists.
+    */
+   Partition getPartition(String catName, String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException;
++  /**
++   * Get a partition.
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tableName table name.
++   * @param part_vals partition values for this table.
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return the partition.
++   * @throws MetaException error reading from RDBMS.
++   * @throws NoSuchObjectException no partition matching this specification exists.
++   */
++  Partition getPartition(String catName, String dbName, String tableName,
++                         List<String> part_vals,
++                         long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Check whether a partition exists.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param partKeys list of partition keys used to generate the partition name.
+    * @param part_vals list of partition values.
+    * @return true if the partition exists, false otherwise.
+    * @throws MetaException failure reading RDBMS
+    * @throws NoSuchObjectException this is never thrown.
+    */
+   boolean doesPartitionExist(String catName, String dbName, String tableName,
+       List<FieldSchema> partKeys, List<String> part_vals)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Drop a partition.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param part_vals list of partition values.
+    * @return true if the partition was dropped.
+    * @throws MetaException Error accessing the RDBMS.
+    * @throws NoSuchObjectException no partition matching this description exists
+    * @throws InvalidObjectException error dropping the statistics for the partition
+    * @throws InvalidInputException error dropping the statistics for the partition
+    */
+   boolean dropPartition(String catName, String dbName, String tableName,
+       List<String> part_vals) throws MetaException, NoSuchObjectException, InvalidObjectException,
+       InvalidInputException;
+ 
+   /**
+    * Get some or all partitions for a table.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name
+    * @param max maximum number of partitions, or -1 to get all partitions.
+    * @return list of partitions
+    * @throws MetaException error access the RDBMS.
+    * @throws NoSuchObjectException no such table exists
+    */
+   List<Partition> getPartitions(String catName, String dbName,
+       String tableName, int max) throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the location for every partition of a given table. If a partition location is a child of
+    * baseLocationToNotShow then the partitionName is returned, but the only null location is
+    * returned.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param baseLocationToNotShow Partition locations which are child of this path are omitted, and
+    *     null value returned instead.
+    * @param max The maximum number of partition locations returned, or -1 for all
+    * @return The map of the partitionName, location pairs
+    */
+   Map<String, String> getPartitionLocations(String catName, String dbName, String tblName,
+       String baseLocationToNotShow, int max);
+ 
+   /**
+    * Alter a table.
+    * @param catName catalog the table is in.
+    * @param dbname database the table is in.
+    * @param name name of the table.
+    * @param newTable New table object.  Which parts of the table can be altered are
+    *                 implementation specific.
+    * @throws InvalidObjectException The new table object is invalid.
+    * @throws MetaException something went wrong, usually in the RDBMS or storage.
+    */
 -  void alterTable(String catName, String dbname, String name, Table newTable)
++  void alterTable(String catName, String dbname, String name, Table newTable,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Update creation metadata for a materialized view.
+    * @param catName catalog name.
+    * @param dbname database name.
+    * @param tablename table name.
+    * @param cm new creation metadata
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void updateCreationMetadata(String catName, String dbname, String tablename, CreationMetadata cm)
+       throws MetaException;
+ 
+   /**
+    * Get table names that match a pattern.
+    * @param catName catalog to search in
+    * @param dbName database to search in
+    * @param pattern pattern to match
+    * @return list of table names, if any
+    * @throws MetaException failure in querying the RDBMS
+    */
+   List<String> getTables(String catName, String dbName, String pattern)
+       throws MetaException;
+ 
+   /**
+    * Get table names that match a pattern.
+    * @param catName catalog to search in
+    * @param dbName database to search in
+    * @param pattern pattern to match
+    * @param tableType type of table to look for
+    * @return list of table names, if any
+    * @throws MetaException failure in querying the RDBMS
+    */
+   List<String> getTables(String catName, String dbName, String pattern, TableType tableType)
+       throws MetaException;
+ 
+   /**
+    * Get list of materialized views in a database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return names of all materialized views in the database
+    * @throws MetaException error querying the RDBMS
+    * @throws NoSuchObjectException no such database
+    */
+   List<String> getMaterializedViewsForRewriting(String catName, String dbName)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+ 
+    * @param catName catalog name to search in. Search must be confined to one catalog.
+    * @param dbNames databases to search in.
+    * @param tableNames names of tables to select.
+    * @param tableTypes types of tables to look for.
+    * @return list of matching table meta information.
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<TableMeta> getTableMeta(String catName, String dbNames, String tableNames,
+                                List<String> tableTypes) throws MetaException;
+ 
+   /**
+    * @param catName catalog name
+    * @param dbname
+    *        The name of the database from which to retrieve the tables
+    * @param tableNames
+    *        The names of the tables to retrieve.
+    * @return A list of the tables retrievable from the database
+    *          whose names are in the list tableNames.
+    *         If there are duplicate names, only one instance of the table will be returned
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<Table> getTableObjectsByName(String catName, String dbname, List<String> tableNames)
+       throws MetaException, UnknownDBException;
+ 
+   /**
+    * Get all tables in a database.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @return list of table names
+    * @throws MetaException failure in querying the RDBMS.
+    */
+   List<String> getAllTables(String catName, String dbName) throws MetaException;
+ 
+   /**
+    * Gets a list of tables based on a filter string and filter type.
+    * @param catName catalog name
+    * @param dbName
+    *          The name of the database from which you will retrieve the table names
+    * @param filter
+    *          The filter string
+    * @param max_tables
+    *          The maximum number of tables returned
+    * @return  A list of table names that match the desired filter
+    * @throws MetaException
+    * @throws UnknownDBException
+    */
+   List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                       short max_tables) throws MetaException, UnknownDBException;
+ 
+   /**
+    * Get a partial or complete list of names for partitions of a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param max_parts maximum number of partitions to retrieve, -1 for all.
+    * @return list of partition names.
+    * @throws MetaException there was an error accessing the RDBMS
+    */
+   List<String> listPartitionNames(String catName, String db_name,
+       String tbl_name, short max_parts) throws MetaException;
+ 
+   /**
+    * Get a list of partition values as one big struct.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param cols partition key columns
+    * @param applyDistinct whether to apply distinct to the list
+    * @param filter filter to apply to the partition names
+    * @param ascending whether to put in ascending order
+    * @param order whether to order
+    * @param maxParts maximum number of parts to return, or -1 for all
+    * @return struct with all of the partition value information
+    * @throws MetaException error access the RDBMS
+    */
+   PartitionValuesResponse listPartitionValues(String catName, String db_name, String tbl_name,
+                                               List<FieldSchema> cols, boolean applyDistinct, String filter, boolean ascending,
+                                               List<FieldSchema> order, long maxParts) throws MetaException;
+ 
+   /**
+    * Alter a partition.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals partition values that describe the partition.
+    * @param new_part new partition object.  This should be a complete copy of the old with
+    *                 changes values, not just the parts to update.
+    * @throws InvalidObjectException No such partition.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void alterPartition(String catName, String db_name, String tbl_name, List<String> part_vals,
 -      Partition new_part) throws InvalidObjectException, MetaException;
++      Partition new_part, long queryTxnId, String queryValidWriteIds)
++          throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Alter a set of partitions.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @param part_vals_list list of list of partition values.  Each outer list describes one
+    *                       partition (with its list of partition values).
+    * @param new_parts list of new partitions.  The order must match the old partitions described in
+    *                  part_vals_list.  Each of these should be a complete copy of the new
+    *                  partition, not just the pieces to update.
++   * @param txnId transaction id of the transaction that called this method.
++   * @param writeIdList valid write id list of the transaction on the current table
++   * @param writeid write id of the transaction for the table
+    * @throws InvalidObjectException One of the indicated partitions does not exist.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   void alterPartitions(String catName, String db_name, String tbl_name,
 -      List<List<String>> part_vals_list, List<Partition> new_parts)
++      List<List<String>> part_vals_list, List<Partition> new_parts, long writeId,
++      long queryTxnId, String queryValidWriteIds)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get partitions with a filter.  This is a portion of the SQL where clause.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tblName table name
+    * @param filter SQL where clause filter
+    * @param maxParts maximum number of partitions to return, or -1 for all.
+    * @return list of partition objects matching the criteria
+    * @throws MetaException Error accessing the RDBMS or processing the filter.
+    * @throws NoSuchObjectException no such table.
+    */
+   List<Partition> getPartitionsByFilter(
+       String catName, String dbName, String tblName, String filter, short maxParts)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions using an already parsed expression.
+    * @param catName catalog name.
+    * @param dbName database name
+    * @param tblName table name
+    * @param expr an already parsed Hive expression
+    * @param defaultPartitionName default name of a partition
+    * @param maxParts maximum number of partitions to return, or -1 for all
+    * @param result list to place resulting partitions in
+    * @return true if the result contains unknown partitions.
+    * @throws TException error executing the expression
+    */
+   boolean getPartitionsByExpr(String catName, String dbName, String tblName,
+       byte[] expr, String defaultPartitionName, short maxParts, List<Partition> result)
+       throws TException;
+ 
+   /**
+    * Get the number of partitions that match a provided SQL filter.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param filter filter from Hive's SQL where clause
+    * @return number of matching partitions.
+    * @throws MetaException error accessing the RDBMS or executing the filter
+    * @throws NoSuchObjectException no such table
+    */
+   int getNumPartitionsByFilter(String catName, String dbName, String tblName, String filter)
+     throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the number of partitions that match an already parsed expression.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param expr an already parsed Hive expression
+    * @return number of matching partitions.
+    * @throws MetaException error accessing the RDBMS or working with the expression.
+    * @throws NoSuchObjectException no such table.
+    */
+   int getNumPartitionsByExpr(String catName, String dbName, String tblName, byte[] expr)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get partitions by name.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are names not values, so they will include
+    *                  both the key and the value.
+    * @return list of matching partitions
+    * @throws MetaException error accessing the RDBMS.
+    * @throws NoSuchObjectException No such table.
+    */
+   List<Partition> getPartitionsByNames(String catName, String dbName, String tblName,
+                                        List<String> partNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   Table markPartitionForEvent(String catName, String dbName, String tblName, Map<String,String> partVals, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ 
+   boolean isPartitionMarkedForEvent(String catName, String dbName, String tblName, Map<String, String> partName, PartitionEventType evtType) throws MetaException, UnknownTableException, InvalidPartitionException, UnknownPartitionException;
+ 
+   boolean addRole(String rowName, String ownerName)
+       throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean removeRole(String roleName) throws MetaException, NoSuchObjectException;
+ 
+   boolean grantRole(Role role, String userName, PrincipalType principalType,
+       String grantor, PrincipalType grantorType, boolean grantOption)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   boolean revokeRole(Role role, String userName, PrincipalType principalType,
+       boolean grantOption) throws MetaException, NoSuchObjectException;
+ 
+   PrincipalPrivilegeSet getUserPrivilegeSet(String userName,
+       List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a database for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated database
+    * @throws InvalidObjectException no such database
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getDBPrivilegeSet (String catName, String dbName, String userName,
+       List<String> groupNames)  throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a table for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated table
+    * @throws InvalidObjectException no such table
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getTablePrivilegeSet (String catName, String dbName, String tableName,
+       String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a partition for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partition partition name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated partition
+    * @throws InvalidObjectException no such partition
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getPartitionPrivilegeSet (String catName, String dbName, String tableName,
+       String partition, String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Get privileges for a column in a table or partition for a user.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name, or null for table level column permissions
+    * @param columnName column name
+    * @param userName user name
+    * @param groupNames list of groups the user is in
+    * @return privileges for that user on indicated column in the table or partition
+    * @throws InvalidObjectException no such table, partition, or column
+    * @throws MetaException error accessing the RDBMS
+    */
+   PrincipalPrivilegeSet getColumnPrivilegeSet (String catName, String dbName, String tableName, String partitionName,
+       String columnName, String userName, List<String> groupNames) throws InvalidObjectException, MetaException;
+ 
+   List<HiveObjectPrivilege> listPrincipalGlobalGrants(String principalName,
+       PrincipalType principalType);
+ 
+   /**
+    * For a given principal name and type, list the DB Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalDBGrants(String principalName,
+       PrincipalType principalType, String catName, String dbName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listAllTableGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partName partition name (not value)
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param columnName column name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalTableColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, String columnName);
+ 
+   /**
+    * For a given principal name and type, list the Table Grants
+    * @param principalName principal name
+    * @param principalType type
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partName partition name (not value)
+    * @param columnName column name
+    * @return list of privileges for that principal on the specified database.
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionColumnGrants(
+       String principalName, PrincipalType principalType, String catName, String dbName,
+       String tableName, List<String> partValues, String partName, String columnName);
+ 
+   boolean grantPrivileges (PrivilegeBag privileges)
+       throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean revokePrivileges(PrivilegeBag privileges, boolean grantOption)
+   throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   boolean refreshPrivileges(HiveObjectRef objToRefresh, String authorizer, PrivilegeBag grantPrivileges)
+   throws InvalidObjectException, MetaException, NoSuchObjectException;
+ 
+   org.apache.hadoop.hive.metastore.api.Role getRole(
+       String roleName) throws NoSuchObjectException;
+ 
+   List<String> listRoleNames();
+ 
+   List<Role> listRoles(String principalName,
+       PrincipalType principalType);
+ 
+   List<RolePrincipalGrant> listRolesWithGrants(String principalName,
+                                                       PrincipalType principalType);
+ 
+ 
+   /**
+    * Get the role to principal grant mapping for given role
+    * @param roleName
+    * @return
+    */
+   List<RolePrincipalGrant> listRoleMembers(String roleName);
+ 
+ 
+   /**
+    * Fetch a partition along with privilege information for a particular user.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partVals partition values
+    * @param user_name user to get privilege information for.
+    * @param group_names groups to get privilege information for.
+    * @return a partition
+    * @throws MetaException error accessing the RDBMS.
+    * @throws NoSuchObjectException no such partition exists
+    * @throws InvalidObjectException error fetching privilege information
+    */
+   Partition getPartitionWithAuth(String catName, String dbName, String tblName,
+       List<String> partVals, String user_name, List<String> group_names)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   /**
+    * Fetch some or all partitions for a table, along with privilege information for a particular
+    * user.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param maxParts maximum number of partitions to fetch, -1 for all partitions.
+    * @param userName user to get privilege information for.
+    * @param groupNames groups to get privilege information for.
+    * @return list of partitions.
+    * @throws MetaException error access the RDBMS.
+    * @throws NoSuchObjectException no such table exists
+    * @throws InvalidObjectException error fetching privilege information.
+    */
+   List<Partition> getPartitionsWithAuth(String catName, String dbName,
+       String tblName, short maxParts, String userName, List<String> groupNames)
+       throws MetaException, NoSuchObjectException, InvalidObjectException;
+ 
+   /**
+    * Lists partition names that match a given partial specification
+    * @param catName catalog name.
+    * @param db_name
+    *          The name of the database which has the partitions
+    * @param tbl_name
+    *          The name of the table which has the partitions
+    * @param part_vals
+    *          A partial list of values for partitions in order of the table's partition keys.
+    *          Entries can be empty if you only want to specify latter partitions.
+    * @param max_parts
+    *          The maximum number of partitions to return
+    * @return A list of partition names that match the partial spec.
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException No such table exists
+    */
+   List<String> listPartitionNamesPs(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Lists partitions that match a given partial specification and sets their auth privileges.
+    *   If userName and groupNames null, then no auth privileges are set.
+    * @param catName catalog name.
+    * @param db_name
+    *          The name of the database which has the partitions
+    * @param tbl_name
+    *          The name of the table which has the partitions
+    * @param part_vals
+    *          A partial list of values for partitions in order of the table's partition keys
+    *          Entries can be empty if you need to specify latter partitions.
+    * @param max_parts
+    *          The maximum number of partitions to return
+    * @param userName
+    *          The user name for the partition for authentication privileges
+    * @param groupNames
+    *          The groupNames for the partition for authentication privileges
+    * @return A list of partitions that match the partial spec.
+    * @throws MetaException error access RDBMS
+    * @throws NoSuchObjectException No such table exists
+    * @throws InvalidObjectException error access privilege information
+    */
+   List<Partition> listPartitionsPsWithAuth(String catName, String db_name, String tbl_name,
+       List<String> part_vals, short max_parts, String userName, List<String> groupNames)
+       throws MetaException, InvalidObjectException, NoSuchObjectException;
+ 
+   /** Persists the given column statistics object to the metastore
+    * @param colStats object to persist
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws InvalidObjectException the stats object is invalid
+    * @throws InvalidInputException unable to record the stats for the table
+    */
+   boolean updateTableColumnStatistics(ColumnStatistics colStats)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /** Persists the given column statistics object to the metastore
+    * @param statsObj object to persist
+    * @param partVals partition values to persist the stats for
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException No such table.
+    * @throws MetaException error accessing the RDBMS.
+    * @throws InvalidObjectException the stats object is invalid
+    * @throws InvalidInputException unable to record the stats for the table
+    */
+   boolean updatePartitionColumnStatistics(ColumnStatistics statsObj,
+      List<String> partVals)
+      throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Returns the relevant column statistics for a given column in a given table in a given database
+    * if such statistics exist.
+    * @param catName catalog name.
+    * @param dbName name of the database, defaults to current database
+    * @param tableName name of the table
+    * @param colName names of the columns for which statistics is requested
+    * @return Relevant column statistics for the column for the given table
+    * @throws NoSuchObjectException No such table
+    * @throws MetaException error accessing the RDBMS
+    *
+    */
+   ColumnStatistics getTableColumnStatistics(String catName, String dbName, String tableName,
+     List<String> colName) throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Returns the relevant column statistics for a given column in a given table in a given database
++   * if such statistics exist.
++   * @param catName catalog name.
++   * @param dbName name of the database, defaults to current database
++   * @param tableName name of the table
++   * @param colName names of the columns for which statistics is requested
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return Relevant column statistics for the column for the given table
++   * @throws NoSuchObjectException No such table
++   * @throws MetaException error accessing the RDBMS
++   *
++   */
++  ColumnStatistics getTableColumnStatistics(
++    String catName, String dbName, String tableName,
++    List<String> colName, long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Get statistics for a partition for a set of columns.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are names so must be key1=val1[/key2=val2...]
+    * @param colNames list of columns to get stats for
+    * @return list of statistics objects
+    * @throws MetaException error accessing the RDBMS
+    * @throws NoSuchObjectException no such partition.
+    */
+   List<ColumnStatistics> getPartitionColumnStatistics(
+      String catName, String dbName, String tblName, List<String> partNames, List<String> colNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Get statistics for a partition for a set of columns.
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tblName table name.
++   * @param partNames list of partition names.  These are names so must be key1=val1[/key2=val2...]
++   * @param colNames list of columns to get stats for
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return list of statistics objects
++   * @throws MetaException error accessing the RDBMS
++   * @throws NoSuchObjectException no such partition.
++   */
++  List<ColumnStatistics> getPartitionColumnStatistics(
++      String catName, String dbName, String tblName,
++      List<String> partNames, List<String> colNames,
++      long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Deletes column statistics if present associated with a given db, table, partition and col. If
+    * null is passed instead of a colName, stats when present for all columns associated
+    * with a given db, table and partition are deleted.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tableName table name.
+    * @param partName partition name.
+    * @param partVals partition values.
+    * @param colName column name.
+    * @return Boolean indicating the outcome of the operation
+    * @throws NoSuchObjectException no such partition
+    * @throws MetaException error access the RDBMS
+    * @throws InvalidObjectException error dropping the stats
+    * @throws InvalidInputException bad input, such as null table or database name.
+    */
+   boolean deletePartitionColumnStatistics(String catName, String dbName, String tableName,
+       String partName, List<String> partVals, String colName)
+       throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Delete statistics for a single column or all columns in a table.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param colName column name.  Null to delete stats for all columns in the table.
+    * @return true if the statistics were deleted.
+    * @throws NoSuchObjectException no such table or column.
+    * @throws MetaException error access the RDBMS.
+    * @throws InvalidObjectException error dropping the stats
+    * @throws InvalidInputException bad inputs, such as null table name.
+    */
+   boolean deleteTableColumnStatistics(String catName, String dbName, String tableName,
+                                       String colName)
+     throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException;
+ 
+   long cleanupEvents();
+ 
+   boolean addToken(String tokenIdentifier, String delegationToken);
+ 
+   boolean removeToken(String tokenIdentifier);
+ 
+   String getToken(String tokenIdentifier);
+ 
+   List<String> getAllTokenIdentifiers();
+ 
+   int addMasterKey(String key) throws MetaException;
+ 
+   void updateMasterKey(Integer seqNo, String key)
+      throws NoSuchObjectException, MetaException;
+ 
+   boolean removeMasterKey(Integer keySeq);
+ 
+   String[] getMasterKeys();
+ 
+   void verifySchema() throws MetaException;
+ 
+   String getMetaStoreSchemaVersion() throws  MetaException;
+ 
+   abstract void setMetaStoreSchemaVersion(String version, String comment) throws MetaException;
+ 
+   /**
+    * Drop a list of partitions.
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name
+    * @param partNames list of partition names.
+    * @throws MetaException error access RDBMS or storage.
+    * @throws NoSuchObjectException One or more of the partitions does not exist.
+    */
+   void dropPartitions(String catName, String dbName, String tblName, List<String> partNames)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * List all DB grants for a given principal.
+    * @param principalName principal name
+    * @param principalType type
+    * @return all DB grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalDBGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Table grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Table grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalTableGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Partition grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Partition grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Table column grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Table column grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalTableColumnGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   /**
+    * List all Partition column grants for a given principal
+    * @param principalName principal name
+    * @param principalType type
+    * @return all Partition column grants for this principal
+    */
+   List<HiveObjectPrivilege> listPrincipalPartitionColumnGrantsAll(
+       String principalName, PrincipalType principalType);
+ 
+   List<HiveObjectPrivilege> listGlobalGrantsAll();
+ 
+   /**
+    * Find all the privileges for a given database.
+    * @param catName catalog name
+    * @param dbName database name
+    * @return list of all privileges.
+    */
+   List<HiveObjectPrivilege> listDBGrantsAll(String catName, String dbName);
+ 
+   /**
+    * Find all of the privileges for a given column in a given partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name (not value)
+    * @param columnName column name
+    * @return all privileges on this column in this partition
+    */
+   List<HiveObjectPrivilege> listPartitionColumnGrantsAll(
+       String catName, String dbName, String tableName, String partitionName, String columnName);
+ 
+   /**
+    * Find all of the privileges for a given table
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @return all privileges on this table
+    */
+   List<HiveObjectPrivilege> listTableGrantsAll(String catName, String dbName, String tableName);
+ 
+   /**
+    * Find all of the privileges for a given partition.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param partitionName partition name (not value)
+    * @return all privileges on this partition
+    */
+   List<HiveObjectPrivilege> listPartitionGrantsAll(
+       String catName, String dbName, String tableName, String partitionName);
+ 
+   /**
+    * Find all of the privileges for a given column in a given table.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param columnName column name
+    * @return all privileges on this column in this table
+    */
+   List<HiveObjectPrivilege> listTableColumnGrantsAll(
+       String catName, String dbName, String tableName, String columnName);
+ 
+   /**
+    * Register a user-defined function based on the function specification passed in.
+    * @param func function to create
+    * @throws InvalidObjectException incorrectly specified function
+    * @throws MetaException error accessing the RDBMS
+    */
+   void createFunction(Function func)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Alter function based on new function specs.
+    * @param dbName database name
+    * @param funcName function name
+    * @param newFunction new function specification
+    * @throws InvalidObjectException no such function, or incorrectly specified new function
+    * @throws MetaException incorrectly specified function
+    */
+   void alterFunction(String catName, String dbName, String funcName, Function newFunction)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Drop a function definition.
+    * @param dbName database name
+    * @param funcName function name
+    * @throws MetaException incorrectly specified function
+    * @throws NoSuchObjectException no such function
+    * @throws InvalidObjectException not sure when this is thrown
+    * @throws InvalidInputException not sure when this is thrown
+    */
+   void dropFunction(String catName, String dbName, String funcName)
+       throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException;
+ 
+   /**
+    * Retrieve function by name.
+    * @param dbName database name
+    * @param funcName function name
+    * @return the function
+    * @throws MetaException incorrectly specified function
+    */
+   Function getFunction(String catName, String dbName, String funcName) throws MetaException;
+ 
+   /**
+    * Retrieve all functions.
+    * @return all functions in a catalog
+    * @throws MetaException incorrectly specified function
+    */
+   List<Function> getAllFunctions(String catName) throws MetaException;
+ 
+   /**
+    * Retrieve list of function names based on name pattern.
+    * @param dbName database name
+    * @param pattern pattern to match
+    * @return functions that match the pattern
+    * @throws MetaException incorrectly specified function
+    */
+   List<String> getFunctions(String catName, String dbName, String pattern) throws MetaException;
+ 
+   /**
+    * Get aggregated stats for a table or partition(s).
+    * @param catName catalog name.
+    * @param dbName database name.
+    * @param tblName table name.
+    * @param partNames list of partition names.  These are the names of the partitions, not
+    *                  values.
+    * @param colNames list of column names
+    * @return aggregated stats
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException no such table or partition
+    */
+   AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
+     List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException;
+ 
+   /**
++   * Get aggregated stats for a table or partition(s).
++   * @param catName catalog name.
++   * @param dbName database name.
++   * @param tblName table name.
++   * @param partNames list of partition names.  These are the names of the partitions, not
++   *                  values.
++   * @param colNames list of column names
++   * @param txnId transaction id of the calling transaction
++   * @param writeIdList string format of valid writeId transaction list
++   * @return aggregated stats
++   * @throws MetaException error accessing RDBMS
++   * @throws NoSuchObjectException no such table or partition
++   */
++  AggrStats get_aggr_stats_for(String catName, String dbName, String tblName,
++    List<String> partNames, List<String> colNames,
++    long txnId, String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
++  /**
+    * Get column stats for all partitions of all tables in the database
+    * @param catName catalog name
+    * @param dbName database name
+    * @return List of column stats objects for all partitions of all tables in the database
+    * @throws MetaException error accessing RDBMS
+    * @throws NoSuchObjectException no such database
+    */
+   List<ColStatsObjWithSourceInfo> getPartitionColStatsForDatabase(String catName, String dbName)
+       throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Get the next notification event.
+    * @param rqst Request containing information on the last processed notification.
+    * @return list of notifications, sorted by eventId
+    */
+   NotificationEventResponse getNextNotification(NotificationEventRequest rqst);
+ 
+ 
+   /**
+    * Add a notification entry.  This should only be called from inside the metastore
+    * @param event the notification to add
+    * @throws MetaException error accessing RDBMS
+    */
+   void addNotificationEvent(NotificationEvent event) throws MetaException;
+ 
+   /**
+    * Remove older notification events.
+    * @param olderThan Remove any events older than a given number of seconds
+    */
+   void cleanNotificationEvents(int olderThan);
+ 
+   /**
+    * Get the last issued notification event id.  This is intended for use by the export command
+    * so that users can determine the state of the system at the point of the export,
+    * and determine which notification events happened before or after the export.
+    * @return
+    */
+   CurrentNotificationEventId getCurrentNotificationEventId();
+ 
+   /**
+    * Get the number of events corresponding to given database with fromEventId.
+    * This is intended for use by the repl commands to track the progress of incremental dump.
+    * @return
+    */
+   NotificationEventsCountResponse getNotificationEventsCount(NotificationEventsCountRequest rqst);
+ 
+   /*
+    * Flush any catalog objects held by the metastore implementation.  Note that this does not
+    * flush statistics objects.  This should be called at the beginning of each query.
+    */
+   void flushCache();
+ 
+   /**
+    * @param fileIds List of file IDs from the filesystem.
+    * @return File metadata buffers from file metadata cache. The array is fileIds-sized, and
+    *         the entries (or nulls, if metadata is not in cache) correspond to fileIds in the list
+    */
+   ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException;
+ 
+   /**
+    * @param fileIds List of file IDs from the filesystem.
+    * @param metadata Metadata buffers corresponding to fileIds in the list.
+    * @param type The type; determines the class that can do additiona processing for metadata.
+    */
+   void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
+       FileMetadataExprType type) throws MetaException;
+ 
+   /**
+    * @return Whether file metadata cache is supported by this implementation.
+    */
+   boolean isFileMetadataSupported();
+ 
+   /**
+    * Gets file metadata from cache after applying a format-specific expression that can
+    * produce additional information based on file metadata and also filter the file list.
+    * @param fileIds List of file IDs from the filesystem.
+    * @param expr Format-specific serialized expression applicable to the files' metadatas.
+    * @param type Expression type; used to determine the class that handles the metadata.
+    * @param metadatas Output parameter; fileIds-sized array to receive the metadatas
+    *                  for corresponding files, if any.
+    * @param exprResults Output parameter; fileIds-sized array to receive the format-specific
+    *                    expression results for the corresponding files.
+    * @param eliminated Output parameter; fileIds-sized array to receive the indication of whether
+    *                   the corresponding files are entirely eliminated by the expression.
+    */
+   void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, byte[] expr,
+       ByteBuffer[] metadatas, ByteBuffer[] exprResults, boolean[] eliminated)
+           throws MetaException;
+ 
+   /** Gets file metadata handler for the corresponding type. */
+   FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type);
+ 
+   /**
+    * Gets total number of tables.
+    */
+   @InterfaceStability.Evolving
+   int getTableCount() throws MetaException;
+ 
+   /**
+    * Gets total number of partitions.
+    */
+   @InterfaceStability.Evolving
+   int getPartitionCount() throws MetaException;
+ 
+   /**
+    * Gets total number of databases.
+    */
+   @InterfaceStability.Evolving
+   int getDatabaseCount() throws MetaException;
+ 
+   /**
+    * Get the primary associated with a table.  Strangely enough each SQLPrimaryKey is actually a
+    * column in they key, not the key itself.  Thus the list.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return list of primary key columns or an empty list if the table does not have a primary key
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String tbl_name)
+       throws MetaException;
+ 
+   /**
+    * Get the foreign keys for a table.  All foreign keys for a particular table can be fetched by
+    * passing null for the last two arguments.
+    * @param catName catalog name.
+    * @param parent_db_name Database the table referred to is in.  This can be null to match all
+    *                       databases.
+    * @param parent_tbl_name Table that is referred to.  This can be null to match all tables.
+    * @param foreign_db_name Database the table with the foreign key is in.
+    * @param foreign_tbl_name Table with the foreign key.
+    * @return List of all matching foreign key columns.  Note that if more than one foreign key
+    * matches the arguments the results here will be all mixed together into a single list.
+    * @throws MetaException error access the RDBMS.
+    */
+   List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
+     String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
+     throws MetaException;
+ 
+   /**
+    * Get unique constraints associated with a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @return list of unique constraints
+    * @throws MetaException error access the RDBMS.
+    */
+   List<SQLUniqueConstraint> getUniqueConstraints(String catName, String db_name,
+     String tbl_name) throws MetaException;
+ 
+   /**
+    * Get not null constraints on a table.
+    * @param catName catalog name.
+    * @param db_name database name.
+    * @param tbl_name table name.
+    * @return list of not null constraints
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<SQLNotNullConstraint> getNotNullConstraints(String catName, String db_name,
+     String tbl_name) throws MetaException;
+ 
+   /**
+    * Get default values for columns in a table.
+    * @param catName catalog name
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return list of default values defined on the table.
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLDefaultConstraint> getDefaultConstraints(String catName, String db_name,
+                                                    String tbl_name) throws MetaException;
+ 
+   /**
+    * Get check constraints for columns in a table.
+    * @param catName catalog name.
+    * @param db_name database name
+    * @param tbl_name table name
+    * @return ccheck constraints for this table
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name,
+                                                    String tbl_name) throws MetaException;
+ 
+   /**
+    * Create a table with constraints
+    * @param tbl table definition
+    * @param primaryKeys primary key definition, or null
+    * @param foreignKeys foreign key definition, or null
+    * @param uniqueConstraints unique constraints definition, or null
+    * @param notNullConstraints not null constraints definition, or null
+    * @param defaultConstraints default values definition, or null
+    * @return list of constraint names
+    * @throws InvalidObjectException one of the provided objects is malformed.
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<String> createTableWithConstraints(Table tbl, List<SQLPrimaryKey> primaryKeys,
+     List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Drop a constraint, any constraint.  I have no idea why add and get each have separate
+    * methods for each constraint type but drop has only one.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param constraintName name of the constraint
+    * @throws NoSuchObjectException no constraint of this name exists
+    */
+   default void dropConstraint(String catName, String dbName, String tableName,
+                               String constraintName) throws NoSuchObjectException {
+     dropConstraint(catName, dbName, tableName, constraintName, false);
+   }
+ 
+   /**
+    * Drop a constraint, any constraint.  I have no idea why add and get each have separate
+    * methods for each constraint type but drop has only one.
+    * @param catName catalog name
+    * @param dbName database name
+    * @param tableName table name
+    * @param constraintName name of the constraint
+    * @param missingOk if true, it is not an error if there is no constraint of this name.  If
+    *                  false and there is no constraint of this name an exception will be thrown.
+    * @throws NoSuchObjectException no constraint of this name exists and missingOk = false
+    */
+   void dropConstraint(String catName, String dbName, String tableName, String constraintName,
+                       boolean missingOk) throws NoSuchObjectException;
+ 
+   /**
+    * Add a primary key to a table.
+    * @param pks Columns in the primary key.
+    * @return the name of the constraint, as a list of strings.
+    * @throws InvalidObjectException The SQLPrimaryKeys list is malformed
+    * @throws MetaException error accessing the RDMBS
+    */
+   List<String> addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add a foreign key to a table.
+    * @param fks foreign key specification
+    * @return foreign key name.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add unique constraints to a table.
+    * @param uks unique constraints specification
+    * @return unique constraint names.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addUniqueConstraints(List<SQLUniqueConstraint> uks) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add not null constraints to a table.
+    * @param nns not null constraint specifications
+    * @return constraint names.
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addNotNullConstraints(List<SQLNotNullConstraint> nns) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add default values to a table definition
+    * @param dv list of default values
+    * @return constraint names
+    * @throws InvalidObjectException the specification is malformed.
+    * @throws MetaException error accessing the RDBMS.
+    */
+   List<String> addDefaultConstraints(List<SQLDefaultConstraint> dv)
+       throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Add check constraints to a table
+    * @param cc check constraints to add
+    * @return list of constraint names
+    * @throws InvalidObjectException the specification is malformed
+    * @throws MetaException error accessing the RDBMS
+    */
+   List<String> addCheckConstraints(List<SQLCheckConstraint> cc) throws InvalidObjectException, MetaException;
+ 
+   /**
+    * Gets the unique id of the backing datastore for the metadata
+    * @return
+    * @throws MetaException
+    */
+   String getMetastoreDbUuid() throws MetaException;
+ 
+   void createResourcePlan(WMResourcePlan resourcePlan, String copyFrom, int defaultPoolSize)
+       throws AlreadyExistsException, MetaException, InvalidObjectException, NoSuchObjectException;
+ 
+   WMFullResourcePlan getResourcePlan(String name) throws NoSuchObjectException, MetaException;
+ 
+   List<WMResourcePlan> getAllResourcePlans() throws MetaException;
+ 
+   WMFullResourcePlan alterResourcePlan(String name, WMNullableResourcePlan resourcePlan,
+       boolean canActivateDisabled, boolean canDeactivate, boolean isReplace)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   WMFullResourcePlan getActiveResourcePlan() throws MetaException;
+ 
+   WMValidateResourcePlanResponse validateResourcePlan(String name)
+       throws NoSuchObjectException, InvalidObjectException, MetaException;
+ 
+   void dropResourcePlan(String name) throws NoSuchObjectException, MetaException;
+ 
+   void createWMTrigger(WMTrigger trigger)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void alterWMTrigger(WMTrigger trigger)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void dropWMTrigger(String resourcePlanName, String triggerName)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   List<WMTrigger> getTriggersForResourcePlan(String resourcePlanName)
+       throws NoSuchObjectException, MetaException;
+ 
+   void createPool(WMPool pool) throws AlreadyExistsException, NoSuchObjectException,
+       InvalidOperationException, MetaException;
+ 
+   void alterPool(WMNullablePool pool, String poolPath) throws AlreadyExistsException,
+       NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void dropWMPool(String resourcePlanName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void createOrUpdateWMMapping(WMMapping mapping, boolean update)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void dropWMMapping(WMMapping mapping)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   void createWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+       throws AlreadyExistsException, NoSuchObjectException, InvalidOperationException,
+           MetaException;
+ 
+   void dropWMTriggerToPoolMapping(String resourcePlanName, String triggerName, String poolPath)
+       throws NoSuchObjectException, InvalidOperationException, MetaException;
+ 
+   /**
+    * Create a new ISchema.
+    * @param schema schema to create
+    * @throws AlreadyExistsException there's already a schema with this name
+    * @throws MetaException general database exception
+    */
+   void createISchema(ISchema schema) throws AlreadyExistsException, MetaException,
+       NoSuchObjectException;
+ 
+   /**
+    * Alter an existing ISchema.  This assumes the caller has already checked that such a schema
+    * exists.
+    * @param schemaName name of the schema
+    * @param newSchema new schema object
+    * @throws NoSuchObjectException no function with this name exists
+    * @throws MetaException general database exception
+    */
+   void alterISchema(ISchemaName schemaName, ISchema newSchema) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get an ISchema by name.
+    * @param schemaName schema descriptor
+    * @return ISchema
+    * @throws MetaException general database exception
+    */
+   ISchema getISchema(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Drop an ISchema.  This does not check whether there are valid versions of the schema in
+    * existence, it assumes the caller has already done that.
+    * @param schemaName schema descriptor
+    * @throws NoSuchObjectException no schema of this name exists
+    * @throws MetaException general database exception
+    */
+   void dropISchema(ISchemaName schemaName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Create a new version of an existing schema.
+    * @param schemaVersion version number
+    * @throws AlreadyExistsException a version of the schema with the same version number already
+    * exists.
+    * @throws InvalidObjectException the passed in SchemaVersion object has problems.
+    * @throws NoSuchObjectException no schema with the passed in name exists.
+    * @throws MetaException general database exception
+    */
+   void addSchemaVersion(SchemaVersion schemaVersion)
+       throws AlreadyExistsException, InvalidObjectException, NoSuchObjectException, MetaException;
+ 
+   /**
+    * Alter a schema version.  Note that the Thrift interface only supports changing the serde
+    * mapping and states.  This method does not guarantee it will check anymore than that.  This
+    * method does not understand the state transitions and just assumes that the new state it is
+    * passed is reasonable.
+    * @param version version descriptor for the schema
+    * @param newVersion altered SchemaVersion
+    * @throws NoSuchObjectException no such version of the named schema exists
+    * @throws MetaException general database exception
+    */
+   void alterSchemaVersion(SchemaVersionDescriptor version, SchemaVersion newVersion)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get a specific schema version.
+    * @param version version descriptor for the schema
+    * @return the SchemaVersion
+    * @throws MetaException general database exception
+    */
+   SchemaVersion getSchemaVersion(SchemaVersionDescriptor version) throws MetaException;
+ 
+   /**
+    * Get the latest version of a schema.
+    * @param schemaName name of the schema
+    * @return latest version of the schema
+    * @throws MetaException general database exception
+    */
+   SchemaVersion getLatestSchemaVersion(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Get all of the versions of a schema
+    * @param schemaName name of the schema
+    * @return all versions of the schema
+    * @throws MetaException general database exception
+    */
+   List<SchemaVersion> getAllSchemaVersion(ISchemaName schemaName) throws MetaException;
+ 
+   /**
+    * Find all SchemaVersion objects that match a query.  The query will select all SchemaVersions
+    * that are equal to all of the non-null passed in arguments.  That is, if arguments
+    * colName='name', colNamespace=null, type='string' are passed in, then all schemas that have
+    * a column with colName 'name' and type 'string' will be returned.
+    * @param colName column name.  Null is ok, which will cause this field to not be used in the
+    *                query.
+    * @param colNamespace column namespace.   Null is ok, which will cause this field to not be
+    *                     used in the query.
+    * @param type column type.   Null is ok, which will cause this field to not be used in the
+    *             query.
+    * @return List of all SchemaVersions that match.  Note that there is no expectation that these
+    * SchemaVersions derive from the same ISchema.  The list will be empty if there are no
+    * matching SchemaVersions.
+    * @throws MetaException general database exception
+    */
+   List<SchemaVersion> getSchemaVersionsByColumns(String colName, String colNamespace, String type)
+       throws MetaException;
+ 
+   /**
+    * Drop a version of the schema.
+    * @param version version descriptor for the schema
+    * @throws NoSuchObjectException no such version of the named schema exists
+    * @throws MetaException general database exception
+    */
+   void dropSchemaVersion(SchemaVersionDescriptor version) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Get serde information
+    * @param serDeName name of the SerDe
+    * @return the SerDe, or null if there is no such serde
+    * @throws NoSuchObjectException no serde with this name exists
+    * @throws MetaException general database exception
+    */
+   SerDeInfo getSerDeInfo(String serDeName) throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Add a serde
+    * @param serde serde to add
+    * @throws AlreadyExistsException a serde of this name already exists
+    * @throws MetaException general database exception
+    */
+   void addSerde(SerDeInfo serde) throws AlreadyExistsException, MetaException;
+ 
+   /** Adds a RuntimeStat for persistence. */
+   void addRuntimeStat(RuntimeStat stat) throws MetaException;
+ 
+   /** Reads runtime statistic entries. */
+   List<RuntimeStat> getRuntimeStats(int maxEntries, int maxCreateTime) throws MetaException;
+ 
+   /** Removes outdated statistics. */
+   int deleteRuntimeStats(int maxRetainSecs) throws MetaException;
+ 
+   List<TableName> getTableNamesWithStats() throws MetaException, NoSuchObjectException;
+ 
+   List<TableName> getAllTableNamesForStats() throws MetaException, NoSuchObjectException;
+ 
+   Map<String, List<String>> getPartitionColsWithStats(String catName, String dbName,
+       String tableName) throws MetaException, NoSuchObjectException;
+ 
+   /**
+    * Remove older notification events.
+    * @param olderThan Remove any events older than a given number of seconds
+    */
+   void cleanWriteNotificationEvents(int olderThan);
+ 
+   /**
+    * Get all write events for a specific transaction .
+    * @param txnId get all the events done by this transaction
+    * @param dbName the name of db for which dump is being taken
+    * @param tableName the name of the table for which the dump is being taken
+    */
+   List<WriteEventInfo> getAllWriteEventInfo(long txnId, String dbName, String tableName) throws MetaException;
+ }


[40/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
new file mode 100644
index 0000000..b9b5117
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterCatalogRequest.java
@@ -0,0 +1,504 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterCatalogRequest implements org.apache.thrift.TBase<AlterCatalogRequest, AlterCatalogRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AlterCatalogRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterCatalogRequest");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField NEW_CAT_FIELD_DESC = new org.apache.thrift.protocol.TField("newCat", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AlterCatalogRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AlterCatalogRequestTupleSchemeFactory());
+  }
+
+  private String name; // required
+  private Catalog newCat; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name"),
+    NEW_CAT((short)2, "newCat");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        case 2: // NEW_CAT
+          return NEW_CAT;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.NEW_CAT, new org.apache.thrift.meta_data.FieldMetaData("newCat", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterCatalogRequest.class, metaDataMap);
+  }
+
+  public AlterCatalogRequest() {
+  }
+
+  public AlterCatalogRequest(
+    String name,
+    Catalog newCat)
+  {
+    this();
+    this.name = name;
+    this.newCat = newCat;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AlterCatalogRequest(AlterCatalogRequest other) {
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+    if (other.isSetNewCat()) {
+      this.newCat = new Catalog(other.newCat);
+    }
+  }
+
+  public AlterCatalogRequest deepCopy() {
+    return new AlterCatalogRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+    this.newCat = null;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public Catalog getNewCat() {
+    return this.newCat;
+  }
+
+  public void setNewCat(Catalog newCat) {
+    this.newCat = newCat;
+  }
+
+  public void unsetNewCat() {
+    this.newCat = null;
+  }
+
+  /** Returns true if field newCat is set (has been assigned a value) and false otherwise */
+  public boolean isSetNewCat() {
+    return this.newCat != null;
+  }
+
+  public void setNewCatIsSet(boolean value) {
+    if (!value) {
+      this.newCat = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case NEW_CAT:
+      if (value == null) {
+        unsetNewCat();
+      } else {
+        setNewCat((Catalog)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    case NEW_CAT:
+      return getNewCat();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    case NEW_CAT:
+      return isSetNewCat();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AlterCatalogRequest)
+      return this.equals((AlterCatalogRequest)that);
+    return false;
+  }
+
+  public boolean equals(AlterCatalogRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_newCat = true && this.isSetNewCat();
+    boolean that_present_newCat = true && that.isSetNewCat();
+    if (this_present_newCat || that_present_newCat) {
+      if (!(this_present_newCat && that_present_newCat))
+        return false;
+      if (!this.newCat.equals(that.newCat))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_newCat = true && (isSetNewCat());
+    list.add(present_newCat);
+    if (present_newCat)
+      list.add(newCat);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AlterCatalogRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNewCat()).compareTo(other.isSetNewCat());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNewCat()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.newCat, other.newCat);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AlterCatalogRequest(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("newCat:");
+    if (this.newCat == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.newCat);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (newCat != null) {
+      newCat.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AlterCatalogRequestStandardSchemeFactory implements SchemeFactory {
+    public AlterCatalogRequestStandardScheme getScheme() {
+      return new AlterCatalogRequestStandardScheme();
+    }
+  }
+
+  private static class AlterCatalogRequestStandardScheme extends StandardScheme<AlterCatalogRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AlterCatalogRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // NEW_CAT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.newCat = new Catalog();
+              struct.newCat.read(iprot);
+              struct.setNewCatIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AlterCatalogRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.newCat != null) {
+        oprot.writeFieldBegin(NEW_CAT_FIELD_DESC);
+        struct.newCat.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AlterCatalogRequestTupleSchemeFactory implements SchemeFactory {
+    public AlterCatalogRequestTupleScheme getScheme() {
+      return new AlterCatalogRequestTupleScheme();
+    }
+  }
+
+  private static class AlterCatalogRequestTupleScheme extends TupleScheme<AlterCatalogRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AlterCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetNewCat()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetNewCat()) {
+        struct.newCat.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AlterCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.newCat = new Catalog();
+        struct.newCat.read(iprot);
+        struct.setNewCatIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterISchemaRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterISchemaRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterISchemaRequest.java
new file mode 100644
index 0000000..c079fab
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlterISchemaRequest.java
@@ -0,0 +1,509 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlterISchemaRequest implements org.apache.thrift.TBase<AlterISchemaRequest, AlterISchemaRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AlterISchemaRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlterISchemaRequest");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField NEW_SCHEMA_FIELD_DESC = new org.apache.thrift.protocol.TField("newSchema", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AlterISchemaRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AlterISchemaRequestTupleSchemeFactory());
+  }
+
+  private ISchemaName name; // required
+  private ISchema newSchema; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name"),
+    NEW_SCHEMA((short)3, "newSchema");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        case 3: // NEW_SCHEMA
+          return NEW_SCHEMA;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ISchemaName.class)));
+    tmpMap.put(_Fields.NEW_SCHEMA, new org.apache.thrift.meta_data.FieldMetaData("newSchema", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ISchema.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlterISchemaRequest.class, metaDataMap);
+  }
+
+  public AlterISchemaRequest() {
+  }
+
+  public AlterISchemaRequest(
+    ISchemaName name,
+    ISchema newSchema)
+  {
+    this();
+    this.name = name;
+    this.newSchema = newSchema;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AlterISchemaRequest(AlterISchemaRequest other) {
+    if (other.isSetName()) {
+      this.name = new ISchemaName(other.name);
+    }
+    if (other.isSetNewSchema()) {
+      this.newSchema = new ISchema(other.newSchema);
+    }
+  }
+
+  public AlterISchemaRequest deepCopy() {
+    return new AlterISchemaRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+    this.newSchema = null;
+  }
+
+  public ISchemaName getName() {
+    return this.name;
+  }
+
+  public void setName(ISchemaName name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public ISchema getNewSchema() {
+    return this.newSchema;
+  }
+
+  public void setNewSchema(ISchema newSchema) {
+    this.newSchema = newSchema;
+  }
+
+  public void unsetNewSchema() {
+    this.newSchema = null;
+  }
+
+  /** Returns true if field newSchema is set (has been assigned a value) and false otherwise */
+  public boolean isSetNewSchema() {
+    return this.newSchema != null;
+  }
+
+  public void setNewSchemaIsSet(boolean value) {
+    if (!value) {
+      this.newSchema = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((ISchemaName)value);
+      }
+      break;
+
+    case NEW_SCHEMA:
+      if (value == null) {
+        unsetNewSchema();
+      } else {
+        setNewSchema((ISchema)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    case NEW_SCHEMA:
+      return getNewSchema();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    case NEW_SCHEMA:
+      return isSetNewSchema();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AlterISchemaRequest)
+      return this.equals((AlterISchemaRequest)that);
+    return false;
+  }
+
+  public boolean equals(AlterISchemaRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_newSchema = true && this.isSetNewSchema();
+    boolean that_present_newSchema = true && that.isSetNewSchema();
+    if (this_present_newSchema || that_present_newSchema) {
+      if (!(this_present_newSchema && that_present_newSchema))
+        return false;
+      if (!this.newSchema.equals(that.newSchema))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_newSchema = true && (isSetNewSchema());
+    list.add(present_newSchema);
+    if (present_newSchema)
+      list.add(newSchema);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AlterISchemaRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNewSchema()).compareTo(other.isSetNewSchema());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNewSchema()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.newSchema, other.newSchema);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AlterISchemaRequest(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("newSchema:");
+    if (this.newSchema == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.newSchema);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (name != null) {
+      name.validate();
+    }
+    if (newSchema != null) {
+      newSchema.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AlterISchemaRequestStandardSchemeFactory implements SchemeFactory {
+    public AlterISchemaRequestStandardScheme getScheme() {
+      return new AlterISchemaRequestStandardScheme();
+    }
+  }
+
+  private static class AlterISchemaRequestStandardScheme extends StandardScheme<AlterISchemaRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AlterISchemaRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.name = new ISchemaName();
+              struct.name.read(iprot);
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NEW_SCHEMA
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.newSchema = new ISchema();
+              struct.newSchema.read(iprot);
+              struct.setNewSchemaIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AlterISchemaRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        struct.name.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.newSchema != null) {
+        oprot.writeFieldBegin(NEW_SCHEMA_FIELD_DESC);
+        struct.newSchema.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AlterISchemaRequestTupleSchemeFactory implements SchemeFactory {
+    public AlterISchemaRequestTupleScheme getScheme() {
+      return new AlterISchemaRequestTupleScheme();
+    }
+  }
+
+  private static class AlterISchemaRequestTupleScheme extends TupleScheme<AlterISchemaRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AlterISchemaRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetNewSchema()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetName()) {
+        struct.name.write(oprot);
+      }
+      if (struct.isSetNewSchema()) {
+        struct.newSchema.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AlterISchemaRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.name = new ISchemaName();
+        struct.name.read(iprot);
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.newSchema = new ISchema();
+        struct.newSchema.read(iprot);
+        struct.setNewSchemaIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java
new file mode 100644
index 0000000..da37d03
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BasicTxnInfo.java
@@ -0,0 +1,907 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class BasicTxnInfo implements org.apache.thrift.TBase<BasicTxnInfo, BasicTxnInfo._Fields>, java.io.Serializable, Cloneable, Comparable<BasicTxnInfo> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BasicTxnInfo");
+
+  private static final org.apache.thrift.protocol.TField ISNULL_FIELD_DESC = new org.apache.thrift.protocol.TField("isnull", org.apache.thrift.protocol.TType.BOOL, (short)1);
+  private static final org.apache.thrift.protocol.TField TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("time", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField PARTITIONNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionname", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new BasicTxnInfoStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new BasicTxnInfoTupleSchemeFactory());
+  }
+
+  private boolean isnull; // required
+  private long time; // optional
+  private long txnid; // optional
+  private String dbname; // optional
+  private String tablename; // optional
+  private String partitionname; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ISNULL((short)1, "isnull"),
+    TIME((short)2, "time"),
+    TXNID((short)3, "txnid"),
+    DBNAME((short)4, "dbname"),
+    TABLENAME((short)5, "tablename"),
+    PARTITIONNAME((short)6, "partitionname");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ISNULL
+          return ISNULL;
+        case 2: // TIME
+          return TIME;
+        case 3: // TXNID
+          return TXNID;
+        case 4: // DBNAME
+          return DBNAME;
+        case 5: // TABLENAME
+          return TABLENAME;
+        case 6: // PARTITIONNAME
+          return PARTITIONNAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISNULL_ISSET_ID = 0;
+  private static final int __TIME_ISSET_ID = 1;
+  private static final int __TXNID_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.TIME,_Fields.TXNID,_Fields.DBNAME,_Fields.TABLENAME,_Fields.PARTITIONNAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ISNULL, new org.apache.thrift.meta_data.FieldMetaData("isnull", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.TIME, new org.apache.thrift.meta_data.FieldMetaData("time", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLENAME, new org.apache.thrift.meta_data.FieldMetaData("tablename", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITIONNAME, new org.apache.thrift.meta_data.FieldMetaData("partitionname", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BasicTxnInfo.class, metaDataMap);
+  }
+
+  public BasicTxnInfo() {
+  }
+
+  public BasicTxnInfo(
+    boolean isnull)
+  {
+    this();
+    this.isnull = isnull;
+    setIsnullIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public BasicTxnInfo(BasicTxnInfo other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.isnull = other.isnull;
+    this.time = other.time;
+    this.txnid = other.txnid;
+    if (other.isSetDbname()) {
+      this.dbname = other.dbname;
+    }
+    if (other.isSetTablename()) {
+      this.tablename = other.tablename;
+    }
+    if (other.isSetPartitionname()) {
+      this.partitionname = other.partitionname;
+    }
+  }
+
+  public BasicTxnInfo deepCopy() {
+    return new BasicTxnInfo(this);
+  }
+
+  @Override
+  public void clear() {
+    setIsnullIsSet(false);
+    this.isnull = false;
+    setTimeIsSet(false);
+    this.time = 0;
+    setTxnidIsSet(false);
+    this.txnid = 0;
+    this.dbname = null;
+    this.tablename = null;
+    this.partitionname = null;
+  }
+
+  public boolean isIsnull() {
+    return this.isnull;
+  }
+
+  public void setIsnull(boolean isnull) {
+    this.isnull = isnull;
+    setIsnullIsSet(true);
+  }
+
+  public void unsetIsnull() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISNULL_ISSET_ID);
+  }
+
+  /** Returns true if field isnull is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsnull() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISNULL_ISSET_ID);
+  }
+
+  public void setIsnullIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISNULL_ISSET_ID, value);
+  }
+
+  public long getTime() {
+    return this.time;
+  }
+
+  public void setTime(long time) {
+    this.time = time;
+    setTimeIsSet(true);
+  }
+
+  public void unsetTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TIME_ISSET_ID);
+  }
+
+  /** Returns true if field time is set (has been assigned a value) and false otherwise */
+  public boolean isSetTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __TIME_ISSET_ID);
+  }
+
+  public void setTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TIME_ISSET_ID, value);
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public String getDbname() {
+    return this.dbname;
+  }
+
+  public void setDbname(String dbname) {
+    this.dbname = dbname;
+  }
+
+  public void unsetDbname() {
+    this.dbname = null;
+  }
+
+  /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbname() {
+    return this.dbname != null;
+  }
+
+  public void setDbnameIsSet(boolean value) {
+    if (!value) {
+      this.dbname = null;
+    }
+  }
+
+  public String getTablename() {
+    return this.tablename;
+  }
+
+  public void setTablename(String tablename) {
+    this.tablename = tablename;
+  }
+
+  public void unsetTablename() {
+    this.tablename = null;
+  }
+
+  /** Returns true if field tablename is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablename() {
+    return this.tablename != null;
+  }
+
+  public void setTablenameIsSet(boolean value) {
+    if (!value) {
+      this.tablename = null;
+    }
+  }
+
+  public String getPartitionname() {
+    return this.partitionname;
+  }
+
+  public void setPartitionname(String partitionname) {
+    this.partitionname = partitionname;
+  }
+
+  public void unsetPartitionname() {
+    this.partitionname = null;
+  }
+
+  /** Returns true if field partitionname is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionname() {
+    return this.partitionname != null;
+  }
+
+  public void setPartitionnameIsSet(boolean value) {
+    if (!value) {
+      this.partitionname = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ISNULL:
+      if (value == null) {
+        unsetIsnull();
+      } else {
+        setIsnull((Boolean)value);
+      }
+      break;
+
+    case TIME:
+      if (value == null) {
+        unsetTime();
+      } else {
+        setTime((Long)value);
+      }
+      break;
+
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    case DBNAME:
+      if (value == null) {
+        unsetDbname();
+      } else {
+        setDbname((String)value);
+      }
+      break;
+
+    case TABLENAME:
+      if (value == null) {
+        unsetTablename();
+      } else {
+        setTablename((String)value);
+      }
+      break;
+
+    case PARTITIONNAME:
+      if (value == null) {
+        unsetPartitionname();
+      } else {
+        setPartitionname((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ISNULL:
+      return isIsnull();
+
+    case TIME:
+      return getTime();
+
+    case TXNID:
+      return getTxnid();
+
+    case DBNAME:
+      return getDbname();
+
+    case TABLENAME:
+      return getTablename();
+
+    case PARTITIONNAME:
+      return getPartitionname();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ISNULL:
+      return isSetIsnull();
+    case TIME:
+      return isSetTime();
+    case TXNID:
+      return isSetTxnid();
+    case DBNAME:
+      return isSetDbname();
+    case TABLENAME:
+      return isSetTablename();
+    case PARTITIONNAME:
+      return isSetPartitionname();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof BasicTxnInfo)
+      return this.equals((BasicTxnInfo)that);
+    return false;
+  }
+
+  public boolean equals(BasicTxnInfo that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_isnull = true;
+    boolean that_present_isnull = true;
+    if (this_present_isnull || that_present_isnull) {
+      if (!(this_present_isnull && that_present_isnull))
+        return false;
+      if (this.isnull != that.isnull)
+        return false;
+    }
+
+    boolean this_present_time = true && this.isSetTime();
+    boolean that_present_time = true && that.isSetTime();
+    if (this_present_time || that_present_time) {
+      if (!(this_present_time && that_present_time))
+        return false;
+      if (this.time != that.time)
+        return false;
+    }
+
+    boolean this_present_txnid = true && this.isSetTxnid();
+    boolean that_present_txnid = true && that.isSetTxnid();
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    boolean this_present_dbname = true && this.isSetDbname();
+    boolean that_present_dbname = true && that.isSetDbname();
+    if (this_present_dbname || that_present_dbname) {
+      if (!(this_present_dbname && that_present_dbname))
+        return false;
+      if (!this.dbname.equals(that.dbname))
+        return false;
+    }
+
+    boolean this_present_tablename = true && this.isSetTablename();
+    boolean that_present_tablename = true && that.isSetTablename();
+    if (this_present_tablename || that_present_tablename) {
+      if (!(this_present_tablename && that_present_tablename))
+        return false;
+      if (!this.tablename.equals(that.tablename))
+        return false;
+    }
+
+    boolean this_present_partitionname = true && this.isSetPartitionname();
+    boolean that_present_partitionname = true && that.isSetPartitionname();
+    if (this_present_partitionname || that_present_partitionname) {
+      if (!(this_present_partitionname && that_present_partitionname))
+        return false;
+      if (!this.partitionname.equals(that.partitionname))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_isnull = true;
+    list.add(present_isnull);
+    if (present_isnull)
+      list.add(isnull);
+
+    boolean present_time = true && (isSetTime());
+    list.add(present_time);
+    if (present_time)
+      list.add(time);
+
+    boolean present_txnid = true && (isSetTxnid());
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    boolean present_dbname = true && (isSetDbname());
+    list.add(present_dbname);
+    if (present_dbname)
+      list.add(dbname);
+
+    boolean present_tablename = true && (isSetTablename());
+    list.add(present_tablename);
+    if (present_tablename)
+      list.add(tablename);
+
+    boolean present_partitionname = true && (isSetPartitionname());
+    list.add(present_partitionname);
+    if (present_partitionname)
+      list.add(partitionname);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(BasicTxnInfo other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetIsnull()).compareTo(other.isSetIsnull());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsnull()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isnull, other.isnull);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTime()).compareTo(other.isSetTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.time, other.time);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTablename()).compareTo(other.isSetTablename());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablename()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename, other.tablename);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionname()).compareTo(other.isSetPartitionname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionname, other.partitionname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("BasicTxnInfo(");
+    boolean first = true;
+
+    sb.append("isnull:");
+    sb.append(this.isnull);
+    first = false;
+    if (isSetTime()) {
+      if (!first) sb.append(", ");
+      sb.append("time:");
+      sb.append(this.time);
+      first = false;
+    }
+    if (isSetTxnid()) {
+      if (!first) sb.append(", ");
+      sb.append("txnid:");
+      sb.append(this.txnid);
+      first = false;
+    }
+    if (isSetDbname()) {
+      if (!first) sb.append(", ");
+      sb.append("dbname:");
+      if (this.dbname == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.dbname);
+      }
+      first = false;
+    }
+    if (isSetTablename()) {
+      if (!first) sb.append(", ");
+      sb.append("tablename:");
+      if (this.tablename == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.tablename);
+      }
+      first = false;
+    }
+    if (isSetPartitionname()) {
+      if (!first) sb.append(", ");
+      sb.append("partitionname:");
+      if (this.partitionname == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitionname);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetIsnull()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'isnull' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class BasicTxnInfoStandardSchemeFactory implements SchemeFactory {
+    public BasicTxnInfoStandardScheme getScheme() {
+      return new BasicTxnInfoStandardScheme();
+    }
+  }
+
+  private static class BasicTxnInfoStandardScheme extends StandardScheme<BasicTxnInfo> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, BasicTxnInfo struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ISNULL
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isnull = iprot.readBool();
+              struct.setIsnullIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.time = iprot.readI64();
+              struct.setTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // DBNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbname = iprot.readString();
+              struct.setDbnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // TABLENAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tablename = iprot.readString();
+              struct.setTablenameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // PARTITIONNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.partitionname = iprot.readString();
+              struct.setPartitionnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, BasicTxnInfo struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(ISNULL_FIELD_DESC);
+      oprot.writeBool(struct.isnull);
+      oprot.writeFieldEnd();
+      if (struct.isSetTime()) {
+        oprot.writeFieldBegin(TIME_FIELD_DESC);
+        oprot.writeI64(struct.time);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetTxnid()) {
+        oprot.writeFieldBegin(TXNID_FIELD_DESC);
+        oprot.writeI64(struct.txnid);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbname != null) {
+        if (struct.isSetDbname()) {
+          oprot.writeFieldBegin(DBNAME_FIELD_DESC);
+          oprot.writeString(struct.dbname);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.tablename != null) {
+        if (struct.isSetTablename()) {
+          oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
+          oprot.writeString(struct.tablename);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.partitionname != null) {
+        if (struct.isSetPartitionname()) {
+          oprot.writeFieldBegin(PARTITIONNAME_FIELD_DESC);
+          oprot.writeString(struct.partitionname);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class BasicTxnInfoTupleSchemeFactory implements SchemeFactory {
+    public BasicTxnInfoTupleScheme getScheme() {
+      return new BasicTxnInfoTupleScheme();
+    }
+  }
+
+  private static class BasicTxnInfoTupleScheme extends TupleScheme<BasicTxnInfo> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, BasicTxnInfo struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBool(struct.isnull);
+      BitSet optionals = new BitSet();
+      if (struct.isSetTime()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTxnid()) {
+        optionals.set(1);
+      }
+      if (struct.isSetDbname()) {
+        optionals.set(2);
+      }
+      if (struct.isSetTablename()) {
+        optionals.set(3);
+      }
+      if (struct.isSetPartitionname()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetTime()) {
+        oprot.writeI64(struct.time);
+      }
+      if (struct.isSetTxnid()) {
+        oprot.writeI64(struct.txnid);
+      }
+      if (struct.isSetDbname()) {
+        oprot.writeString(struct.dbname);
+      }
+      if (struct.isSetTablename()) {
+        oprot.writeString(struct.tablename);
+      }
+      if (struct.isSetPartitionname()) {
+        oprot.writeString(struct.partitionname);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, BasicTxnInfo struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.isnull = iprot.readBool();
+      struct.setIsnullIsSet(true);
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.time = iprot.readI64();
+        struct.setTimeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.txnid = iprot.readI64();
+        struct.setTxnidIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.dbname = iprot.readString();
+        struct.setDbnameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.tablename = iprot.readString();
+        struct.setTablenameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.partitionname = iprot.readString();
+        struct.setPartitionnameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
new file mode 100644
index 0000000..9848bea
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BinaryColumnStatsData.java
@@ -0,0 +1,696 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class BinaryColumnStatsData implements org.apache.thrift.TBase<BinaryColumnStatsData, BinaryColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<BinaryColumnStatsData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BinaryColumnStatsData");
+
+  private static final org.apache.thrift.protocol.TField MAX_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("maxColLen", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField AVG_COL_LEN_FIELD_DESC = new org.apache.thrift.protocol.TField("avgColLen", org.apache.thrift.protocol.TType.DOUBLE, (short)2);
+  private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField BIT_VECTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("bitVectors", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new BinaryColumnStatsDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new BinaryColumnStatsDataTupleSchemeFactory());
+  }
+
+  private long maxColLen; // required
+  private double avgColLen; // required
+  private long numNulls; // required
+  private ByteBuffer bitVectors; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MAX_COL_LEN((short)1, "maxColLen"),
+    AVG_COL_LEN((short)2, "avgColLen"),
+    NUM_NULLS((short)3, "numNulls"),
+    BIT_VECTORS((short)4, "bitVectors");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MAX_COL_LEN
+          return MAX_COL_LEN;
+        case 2: // AVG_COL_LEN
+          return AVG_COL_LEN;
+        case 3: // NUM_NULLS
+          return NUM_NULLS;
+        case 4: // BIT_VECTORS
+          return BIT_VECTORS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __MAXCOLLEN_ISSET_ID = 0;
+  private static final int __AVGCOLLEN_ISSET_ID = 1;
+  private static final int __NUMNULLS_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.BIT_VECTORS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MAX_COL_LEN, new org.apache.thrift.meta_data.FieldMetaData("maxColLen", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.AVG_COL_LEN, new org.apache.thrift.meta_data.FieldMetaData("avgColLen", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.BIT_VECTORS, new org.apache.thrift.meta_data.FieldMetaData("bitVectors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BinaryColumnStatsData.class, metaDataMap);
+  }
+
+  public BinaryColumnStatsData() {
+  }
+
+  public BinaryColumnStatsData(
+    long maxColLen,
+    double avgColLen,
+    long numNulls)
+  {
+    this();
+    this.maxColLen = maxColLen;
+    setMaxColLenIsSet(true);
+    this.avgColLen = avgColLen;
+    setAvgColLenIsSet(true);
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public BinaryColumnStatsData(BinaryColumnStatsData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.maxColLen = other.maxColLen;
+    this.avgColLen = other.avgColLen;
+    this.numNulls = other.numNulls;
+    if (other.isSetBitVectors()) {
+      this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(other.bitVectors);
+    }
+  }
+
+  public BinaryColumnStatsData deepCopy() {
+    return new BinaryColumnStatsData(this);
+  }
+
+  @Override
+  public void clear() {
+    setMaxColLenIsSet(false);
+    this.maxColLen = 0;
+    setAvgColLenIsSet(false);
+    this.avgColLen = 0.0;
+    setNumNullsIsSet(false);
+    this.numNulls = 0;
+    this.bitVectors = null;
+  }
+
+  public long getMaxColLen() {
+    return this.maxColLen;
+  }
+
+  public void setMaxColLen(long maxColLen) {
+    this.maxColLen = maxColLen;
+    setMaxColLenIsSet(true);
+  }
+
+  public void unsetMaxColLen() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID);
+  }
+
+  /** Returns true if field maxColLen is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaxColLen() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID);
+  }
+
+  public void setMaxColLenIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXCOLLEN_ISSET_ID, value);
+  }
+
+  public double getAvgColLen() {
+    return this.avgColLen;
+  }
+
+  public void setAvgColLen(double avgColLen) {
+    this.avgColLen = avgColLen;
+    setAvgColLenIsSet(true);
+  }
+
+  public void unsetAvgColLen() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID);
+  }
+
+  /** Returns true if field avgColLen is set (has been assigned a value) and false otherwise */
+  public boolean isSetAvgColLen() {
+    return EncodingUtils.testBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID);
+  }
+
+  public void setAvgColLenIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __AVGCOLLEN_ISSET_ID, value);
+  }
+
+  public long getNumNulls() {
+    return this.numNulls;
+  }
+
+  public void setNumNulls(long numNulls) {
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  public void unsetNumNulls() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumNulls() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  public void setNumNullsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value);
+  }
+
+  public byte[] getBitVectors() {
+    setBitVectors(org.apache.thrift.TBaseHelper.rightSize(bitVectors));
+    return bitVectors == null ? null : bitVectors.array();
+  }
+
+  public ByteBuffer bufferForBitVectors() {
+    return org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void setBitVectors(byte[] bitVectors) {
+    this.bitVectors = bitVectors == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bitVectors, bitVectors.length));
+  }
+
+  public void setBitVectors(ByteBuffer bitVectors) {
+    this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void unsetBitVectors() {
+    this.bitVectors = null;
+  }
+
+  /** Returns true if field bitVectors is set (has been assigned a value) and false otherwise */
+  public boolean isSetBitVectors() {
+    return this.bitVectors != null;
+  }
+
+  public void setBitVectorsIsSet(boolean value) {
+    if (!value) {
+      this.bitVectors = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MAX_COL_LEN:
+      if (value == null) {
+        unsetMaxColLen();
+      } else {
+        setMaxColLen((Long)value);
+      }
+      break;
+
+    case AVG_COL_LEN:
+      if (value == null) {
+        unsetAvgColLen();
+      } else {
+        setAvgColLen((Double)value);
+      }
+      break;
+
+    case NUM_NULLS:
+      if (value == null) {
+        unsetNumNulls();
+      } else {
+        setNumNulls((Long)value);
+      }
+      break;
+
+    case BIT_VECTORS:
+      if (value == null) {
+        unsetBitVectors();
+      } else {
+        setBitVectors((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MAX_COL_LEN:
+      return getMaxColLen();
+
+    case AVG_COL_LEN:
+      return getAvgColLen();
+
+    case NUM_NULLS:
+      return getNumNulls();
+
+    case BIT_VECTORS:
+      return getBitVectors();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MAX_COL_LEN:
+      return isSetMaxColLen();
+    case AVG_COL_LEN:
+      return isSetAvgColLen();
+    case NUM_NULLS:
+      return isSetNumNulls();
+    case BIT_VECTORS:
+      return isSetBitVectors();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof BinaryColumnStatsData)
+      return this.equals((BinaryColumnStatsData)that);
+    return false;
+  }
+
+  public boolean equals(BinaryColumnStatsData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_maxColLen = true;
+    boolean that_present_maxColLen = true;
+    if (this_present_maxColLen || that_present_maxColLen) {
+      if (!(this_present_maxColLen && that_present_maxColLen))
+        return false;
+      if (this.maxColLen != that.maxColLen)
+        return false;
+    }
+
+    boolean this_present_avgColLen = true;
+    boolean that_present_avgColLen = true;
+    if (this_present_avgColLen || that_present_avgColLen) {
+      if (!(this_present_avgColLen && that_present_avgColLen))
+        return false;
+      if (this.avgColLen != that.avgColLen)
+        return false;
+    }
+
+    boolean this_present_numNulls = true;
+    boolean that_present_numNulls = true;
+    if (this_present_numNulls || that_present_numNulls) {
+      if (!(this_present_numNulls && that_present_numNulls))
+        return false;
+      if (this.numNulls != that.numNulls)
+        return false;
+    }
+
+    boolean this_present_bitVectors = true && this.isSetBitVectors();
+    boolean that_present_bitVectors = true && that.isSetBitVectors();
+    if (this_present_bitVectors || that_present_bitVectors) {
+      if (!(this_present_bitVectors && that_present_bitVectors))
+        return false;
+      if (!this.bitVectors.equals(that.bitVectors))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_maxColLen = true;
+    list.add(present_maxColLen);
+    if (present_maxColLen)
+      list.add(maxColLen);
+
+    boolean present_avgColLen = true;
+    list.add(present_avgColLen);
+    if (present_avgColLen)
+      list.add(avgColLen);
+
+    boolean present_numNulls = true;
+    list.add(present_numNulls);
+    if (present_numNulls)
+      list.add(numNulls);
+
+    boolean present_bitVectors = true && (isSetBitVectors());
+    list.add(present_bitVectors);
+    if (present_bitVectors)
+      list.add(bitVectors);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(BinaryColumnStatsData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMaxColLen()).compareTo(other.isSetMaxColLen());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaxColLen()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxColLen, other.maxColLen);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAvgColLen()).compareTo(other.isSetAvgColLen());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAvgColLen()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.avgColLen, other.avgColLen);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(other.isSetNumNulls());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumNulls()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, other.numNulls);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetBitVectors()).compareTo(other.isSetBitVectors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBitVectors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bitVectors, other.bitVectors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("BinaryColumnStatsData(");
+    boolean first = true;
+
+    sb.append("maxColLen:");
+    sb.append(this.maxColLen);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("avgColLen:");
+    sb.append(this.avgColLen);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numNulls:");
+    sb.append(this.numNulls);
+    first = false;
+    if (isSetBitVectors()) {
+      if (!first) sb.append(", ");
+      sb.append("bitVectors:");
+      if (this.bitVectors == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.bitVectors, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetMaxColLen()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'maxColLen' is unset! Struct:" + toString());
+    }
+
+    if (!isSetAvgColLen()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'avgColLen' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumNulls()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class BinaryColumnStatsDataStandardSchemeFactory implements SchemeFactory {
+    public BinaryColumnStatsDataStandardScheme getScheme() {
+      return new BinaryColumnStatsDataStandardScheme();
+    }
+  }
+
+  private static class BinaryColumnStatsDataStandardScheme extends StandardScheme<BinaryColumnStatsData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, BinaryColumnStatsData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MAX_COL_LEN
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.maxColLen = iprot.readI64();
+              struct.setMaxColLenIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // AVG_COL_LEN
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.avgColLen = iprot.readDouble();
+              struct.setAvgColLenIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NUM_NULLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numNulls = iprot.readI64();
+              struct.setNumNullsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // BIT_VECTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.bitVectors = iprot.readBinary();
+              struct.setBitVectorsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, BinaryColumnStatsData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(MAX_COL_LEN_FIELD_DESC);
+      oprot.writeI64(struct.maxColLen);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(AVG_COL_LEN_FIELD_DESC);
+      oprot.writeDouble(struct.avgColLen);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC);
+      oprot.writeI64(struct.numNulls);
+      oprot.writeFieldEnd();
+      if (struct.bitVectors != null) {
+        if (struct.isSetBitVectors()) {
+          oprot.writeFieldBegin(BIT_VECTORS_FIELD_DESC);
+          oprot.writeBinary(struct.bitVectors);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class BinaryColumnStatsDataTupleSchemeFactory implements SchemeFactory {
+    public BinaryColumnStatsDataTupleScheme getScheme() {
+      return new BinaryColumnStatsDataTupleScheme();
+    }
+  }
+
+  private static class BinaryColumnStatsDataTupleScheme extends TupleScheme<BinaryColumnStatsData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, BinaryColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.maxColLen);
+      oprot.writeDouble(struct.avgColLen);
+      oprot.writeI64(struct.numNulls);
+      BitSet optionals = new BitSet();
+      if (struct.isSetBitVectors()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetBitVectors()) {
+        oprot.writeBinary(struct.bitVectors);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, BinaryColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.maxColLen = iprot.readI64();
+      struct.setMaxColLenIsSet(true);
+      struct.avgColLen = iprot.readDouble();
+      struct.setAvgColLenIsSet(true);
+      struct.numNulls = iprot.readI64();
+      struct.setNumNullsIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.bitVectors = iprot.readBinary();
+        struct.setBitVectorsIsSet(true);
+      }
+    }
+  }
+
+}
+


[27/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FunctionType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FunctionType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FunctionType.java
new file mode 100644
index 0000000..1116f88
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FunctionType.java
@@ -0,0 +1,42 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum FunctionType implements org.apache.thrift.TEnum {
+  JAVA(1);
+
+  private final int value;
+
+  private FunctionType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static FunctionType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return JAVA;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
new file mode 100644
index 0000000..f68afe8
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java
@@ -0,0 +1,447 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetAllFunctionsResponse implements org.apache.thrift.TBase<GetAllFunctionsResponse, GetAllFunctionsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetAllFunctionsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetAllFunctionsResponse");
+
+  private static final org.apache.thrift.protocol.TField FUNCTIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("functions", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetAllFunctionsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetAllFunctionsResponseTupleSchemeFactory());
+  }
+
+  private List<Function> functions; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FUNCTIONS((short)1, "functions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FUNCTIONS
+          return FUNCTIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.FUNCTIONS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FUNCTIONS, new org.apache.thrift.meta_data.FieldMetaData("functions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Function.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetAllFunctionsResponse.class, metaDataMap);
+  }
+
+  public GetAllFunctionsResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetAllFunctionsResponse(GetAllFunctionsResponse other) {
+    if (other.isSetFunctions()) {
+      List<Function> __this__functions = new ArrayList<Function>(other.functions.size());
+      for (Function other_element : other.functions) {
+        __this__functions.add(new Function(other_element));
+      }
+      this.functions = __this__functions;
+    }
+  }
+
+  public GetAllFunctionsResponse deepCopy() {
+    return new GetAllFunctionsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.functions = null;
+  }
+
+  public int getFunctionsSize() {
+    return (this.functions == null) ? 0 : this.functions.size();
+  }
+
+  public java.util.Iterator<Function> getFunctionsIterator() {
+    return (this.functions == null) ? null : this.functions.iterator();
+  }
+
+  public void addToFunctions(Function elem) {
+    if (this.functions == null) {
+      this.functions = new ArrayList<Function>();
+    }
+    this.functions.add(elem);
+  }
+
+  public List<Function> getFunctions() {
+    return this.functions;
+  }
+
+  public void setFunctions(List<Function> functions) {
+    this.functions = functions;
+  }
+
+  public void unsetFunctions() {
+    this.functions = null;
+  }
+
+  /** Returns true if field functions is set (has been assigned a value) and false otherwise */
+  public boolean isSetFunctions() {
+    return this.functions != null;
+  }
+
+  public void setFunctionsIsSet(boolean value) {
+    if (!value) {
+      this.functions = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FUNCTIONS:
+      if (value == null) {
+        unsetFunctions();
+      } else {
+        setFunctions((List<Function>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FUNCTIONS:
+      return getFunctions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FUNCTIONS:
+      return isSetFunctions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetAllFunctionsResponse)
+      return this.equals((GetAllFunctionsResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetAllFunctionsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_functions = true && this.isSetFunctions();
+    boolean that_present_functions = true && that.isSetFunctions();
+    if (this_present_functions || that_present_functions) {
+      if (!(this_present_functions && that_present_functions))
+        return false;
+      if (!this.functions.equals(that.functions))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_functions = true && (isSetFunctions());
+    list.add(present_functions);
+    if (present_functions)
+      list.add(functions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetAllFunctionsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFunctions()).compareTo(other.isSetFunctions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFunctions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.functions, other.functions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetAllFunctionsResponse(");
+    boolean first = true;
+
+    if (isSetFunctions()) {
+      sb.append("functions:");
+      if (this.functions == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.functions);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetAllFunctionsResponseStandardSchemeFactory implements SchemeFactory {
+    public GetAllFunctionsResponseStandardScheme getScheme() {
+      return new GetAllFunctionsResponseStandardScheme();
+    }
+  }
+
+  private static class GetAllFunctionsResponseStandardScheme extends StandardScheme<GetAllFunctionsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FUNCTIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list832 = iprot.readListBegin();
+                struct.functions = new ArrayList<Function>(_list832.size);
+                Function _elem833;
+                for (int _i834 = 0; _i834 < _list832.size; ++_i834)
+                {
+                  _elem833 = new Function();
+                  _elem833.read(iprot);
+                  struct.functions.add(_elem833);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFunctionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.functions != null) {
+        if (struct.isSetFunctions()) {
+          oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size()));
+            for (Function _iter835 : struct.functions)
+            {
+              _iter835.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetAllFunctionsResponseTupleSchemeFactory implements SchemeFactory {
+    public GetAllFunctionsResponseTupleScheme getScheme() {
+      return new GetAllFunctionsResponseTupleScheme();
+    }
+  }
+
+  private static class GetAllFunctionsResponseTupleScheme extends TupleScheme<GetAllFunctionsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetFunctions()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetFunctions()) {
+        {
+          oprot.writeI32(struct.functions.size());
+          for (Function _iter836 : struct.functions)
+          {
+            _iter836.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list837 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.functions = new ArrayList<Function>(_list837.size);
+          Function _elem838;
+          for (int _i839 = 0; _i839 < _list837.size; ++_i839)
+          {
+            _elem838 = new Function();
+            _elem838.read(iprot);
+            struct.functions.add(_elem838);
+          }
+        }
+        struct.setFunctionsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java
new file mode 100644
index 0000000..c0e6240
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogRequest.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetCatalogRequest implements org.apache.thrift.TBase<GetCatalogRequest, GetCatalogRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetCatalogRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogRequest");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetCatalogRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetCatalogRequestTupleSchemeFactory());
+  }
+
+  private String name; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogRequest.class, metaDataMap);
+  }
+
+  public GetCatalogRequest() {
+  }
+
+  public GetCatalogRequest(
+    String name)
+  {
+    this();
+    this.name = name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetCatalogRequest(GetCatalogRequest other) {
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+  }
+
+  public GetCatalogRequest deepCopy() {
+    return new GetCatalogRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetCatalogRequest)
+      return this.equals((GetCatalogRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetCatalogRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetCatalogRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetCatalogRequest(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetCatalogRequestStandardSchemeFactory implements SchemeFactory {
+    public GetCatalogRequestStandardScheme getScheme() {
+      return new GetCatalogRequestStandardScheme();
+    }
+  }
+
+  private static class GetCatalogRequestStandardScheme extends StandardScheme<GetCatalogRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetCatalogRequestTupleSchemeFactory implements SchemeFactory {
+    public GetCatalogRequestTupleScheme getScheme() {
+      return new GetCatalogRequestTupleScheme();
+    }
+  }
+
+  private static class GetCatalogRequestTupleScheme extends TupleScheme<GetCatalogRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java
new file mode 100644
index 0000000..096f5ef
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogResponse.java
@@ -0,0 +1,400 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetCatalogResponse implements org.apache.thrift.TBase<GetCatalogResponse, GetCatalogResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetCatalogResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogResponse");
+
+  private static final org.apache.thrift.protocol.TField CATALOG_FIELD_DESC = new org.apache.thrift.protocol.TField("catalog", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetCatalogResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetCatalogResponseTupleSchemeFactory());
+  }
+
+  private Catalog catalog; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CATALOG((short)1, "catalog");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CATALOG
+          return CATALOG;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CATALOG, new org.apache.thrift.meta_data.FieldMetaData("catalog", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Catalog.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogResponse.class, metaDataMap);
+  }
+
+  public GetCatalogResponse() {
+  }
+
+  public GetCatalogResponse(
+    Catalog catalog)
+  {
+    this();
+    this.catalog = catalog;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetCatalogResponse(GetCatalogResponse other) {
+    if (other.isSetCatalog()) {
+      this.catalog = new Catalog(other.catalog);
+    }
+  }
+
+  public GetCatalogResponse deepCopy() {
+    return new GetCatalogResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catalog = null;
+  }
+
+  public Catalog getCatalog() {
+    return this.catalog;
+  }
+
+  public void setCatalog(Catalog catalog) {
+    this.catalog = catalog;
+  }
+
+  public void unsetCatalog() {
+    this.catalog = null;
+  }
+
+  /** Returns true if field catalog is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatalog() {
+    return this.catalog != null;
+  }
+
+  public void setCatalogIsSet(boolean value) {
+    if (!value) {
+      this.catalog = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CATALOG:
+      if (value == null) {
+        unsetCatalog();
+      } else {
+        setCatalog((Catalog)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CATALOG:
+      return getCatalog();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CATALOG:
+      return isSetCatalog();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetCatalogResponse)
+      return this.equals((GetCatalogResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetCatalogResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catalog = true && this.isSetCatalog();
+    boolean that_present_catalog = true && that.isSetCatalog();
+    if (this_present_catalog || that_present_catalog) {
+      if (!(this_present_catalog && that_present_catalog))
+        return false;
+      if (!this.catalog.equals(that.catalog))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catalog = true && (isSetCatalog());
+    list.add(present_catalog);
+    if (present_catalog)
+      list.add(catalog);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetCatalogResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatalog()).compareTo(other.isSetCatalog());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatalog()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalog, other.catalog);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetCatalogResponse(");
+    boolean first = true;
+
+    sb.append("catalog:");
+    if (this.catalog == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catalog);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (catalog != null) {
+      catalog.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetCatalogResponseStandardSchemeFactory implements SchemeFactory {
+    public GetCatalogResponseStandardScheme getScheme() {
+      return new GetCatalogResponseStandardScheme();
+    }
+  }
+
+  private static class GetCatalogResponseStandardScheme extends StandardScheme<GetCatalogResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CATALOG
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.catalog = new Catalog();
+              struct.catalog.read(iprot);
+              struct.setCatalogIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catalog != null) {
+        oprot.writeFieldBegin(CATALOG_FIELD_DESC);
+        struct.catalog.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetCatalogResponseTupleSchemeFactory implements SchemeFactory {
+    public GetCatalogResponseTupleScheme getScheme() {
+      return new GetCatalogResponseTupleScheme();
+    }
+  }
+
+  private static class GetCatalogResponseTupleScheme extends TupleScheme<GetCatalogResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatalog()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCatalog()) {
+        struct.catalog.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.catalog = new Catalog();
+        struct.catalog.read(iprot);
+        struct.setCatalogIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java
new file mode 100644
index 0000000..aafd528
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetCatalogsResponse.java
@@ -0,0 +1,444 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetCatalogsResponse implements org.apache.thrift.TBase<GetCatalogsResponse, GetCatalogsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetCatalogsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCatalogsResponse");
+
+  private static final org.apache.thrift.protocol.TField NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("names", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetCatalogsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetCatalogsResponseTupleSchemeFactory());
+  }
+
+  private List<String> names; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAMES((short)1, "names");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAMES
+          return NAMES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAMES, new org.apache.thrift.meta_data.FieldMetaData("names", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCatalogsResponse.class, metaDataMap);
+  }
+
+  public GetCatalogsResponse() {
+  }
+
+  public GetCatalogsResponse(
+    List<String> names)
+  {
+    this();
+    this.names = names;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetCatalogsResponse(GetCatalogsResponse other) {
+    if (other.isSetNames()) {
+      List<String> __this__names = new ArrayList<String>(other.names);
+      this.names = __this__names;
+    }
+  }
+
+  public GetCatalogsResponse deepCopy() {
+    return new GetCatalogsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.names = null;
+  }
+
+  public int getNamesSize() {
+    return (this.names == null) ? 0 : this.names.size();
+  }
+
+  public java.util.Iterator<String> getNamesIterator() {
+    return (this.names == null) ? null : this.names.iterator();
+  }
+
+  public void addToNames(String elem) {
+    if (this.names == null) {
+      this.names = new ArrayList<String>();
+    }
+    this.names.add(elem);
+  }
+
+  public List<String> getNames() {
+    return this.names;
+  }
+
+  public void setNames(List<String> names) {
+    this.names = names;
+  }
+
+  public void unsetNames() {
+    this.names = null;
+  }
+
+  /** Returns true if field names is set (has been assigned a value) and false otherwise */
+  public boolean isSetNames() {
+    return this.names != null;
+  }
+
+  public void setNamesIsSet(boolean value) {
+    if (!value) {
+      this.names = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAMES:
+      if (value == null) {
+        unsetNames();
+      } else {
+        setNames((List<String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAMES:
+      return getNames();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAMES:
+      return isSetNames();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetCatalogsResponse)
+      return this.equals((GetCatalogsResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetCatalogsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_names = true && this.isSetNames();
+    boolean that_present_names = true && that.isSetNames();
+    if (this_present_names || that_present_names) {
+      if (!(this_present_names && that_present_names))
+        return false;
+      if (!this.names.equals(that.names))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_names = true && (isSetNames());
+    list.add(present_names);
+    if (present_names)
+      list.add(names);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetCatalogsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetNames()).compareTo(other.isSetNames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.names, other.names);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetCatalogsResponse(");
+    boolean first = true;
+
+    sb.append("names:");
+    if (this.names == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.names);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetCatalogsResponseStandardSchemeFactory implements SchemeFactory {
+    public GetCatalogsResponseStandardScheme getScheme() {
+      return new GetCatalogsResponseStandardScheme();
+    }
+  }
+
+  private static class GetCatalogsResponseStandardScheme extends StandardScheme<GetCatalogsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetCatalogsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list94 = iprot.readListBegin();
+                struct.names = new ArrayList<String>(_list94.size);
+                String _elem95;
+                for (int _i96 = 0; _i96 < _list94.size; ++_i96)
+                {
+                  _elem95 = iprot.readString();
+                  struct.names.add(_elem95);
+                }
+                iprot.readListEnd();
+              }
+              struct.setNamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetCatalogsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.names != null) {
+        oprot.writeFieldBegin(NAMES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size()));
+          for (String _iter97 : struct.names)
+          {
+            oprot.writeString(_iter97);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetCatalogsResponseTupleSchemeFactory implements SchemeFactory {
+    public GetCatalogsResponseTupleScheme getScheme() {
+      return new GetCatalogsResponseTupleScheme();
+    }
+  }
+
+  private static class GetCatalogsResponseTupleScheme extends TupleScheme<GetCatalogsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetCatalogsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetNames()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetNames()) {
+        {
+          oprot.writeI32(struct.names.size());
+          for (String _iter98 : struct.names)
+          {
+            oprot.writeString(_iter98);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetCatalogsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.names = new ArrayList<String>(_list99.size);
+          String _elem100;
+          for (int _i101 = 0; _i101 < _list99.size; ++_i101)
+          {
+            _elem100 = iprot.readString();
+            struct.names.add(_elem100);
+          }
+        }
+        struct.setNamesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java
new file mode 100644
index 0000000..836f35f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java
@@ -0,0 +1,773 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetFileMetadataByExprRequest implements org.apache.thrift.TBase<GetFileMetadataByExprRequest, GetFileMetadataByExprRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetFileMetadataByExprRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataByExprRequest");
+
+  private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField DO_GET_FOOTERS_FIELD_DESC = new org.apache.thrift.protocol.TField("doGetFooters", org.apache.thrift.protocol.TType.BOOL, (short)3);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetFileMetadataByExprRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetFileMetadataByExprRequestTupleSchemeFactory());
+  }
+
+  private List<Long> fileIds; // required
+  private ByteBuffer expr; // required
+  private boolean doGetFooters; // optional
+  private FileMetadataExprType type; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FILE_IDS((short)1, "fileIds"),
+    EXPR((short)2, "expr"),
+    DO_GET_FOOTERS((short)3, "doGetFooters"),
+    /**
+     * 
+     * @see FileMetadataExprType
+     */
+    TYPE((short)4, "type");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FILE_IDS
+          return FILE_IDS;
+        case 2: // EXPR
+          return EXPR;
+        case 3: // DO_GET_FOOTERS
+          return DO_GET_FOOTERS;
+        case 4: // TYPE
+          return TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __DOGETFOOTERS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.DO_GET_FOOTERS,_Fields.TYPE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.EXPR, new org.apache.thrift.meta_data.FieldMetaData("expr", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    tmpMap.put(_Fields.DO_GET_FOOTERS, new org.apache.thrift.meta_data.FieldMetaData("doGetFooters", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, FileMetadataExprType.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataByExprRequest.class, metaDataMap);
+  }
+
+  public GetFileMetadataByExprRequest() {
+  }
+
+  public GetFileMetadataByExprRequest(
+    List<Long> fileIds,
+    ByteBuffer expr)
+  {
+    this();
+    this.fileIds = fileIds;
+    this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetFileMetadataByExprRequest(GetFileMetadataByExprRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetFileIds()) {
+      List<Long> __this__fileIds = new ArrayList<Long>(other.fileIds);
+      this.fileIds = __this__fileIds;
+    }
+    if (other.isSetExpr()) {
+      this.expr = org.apache.thrift.TBaseHelper.copyBinary(other.expr);
+    }
+    this.doGetFooters = other.doGetFooters;
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+  }
+
+  public GetFileMetadataByExprRequest deepCopy() {
+    return new GetFileMetadataByExprRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.fileIds = null;
+    this.expr = null;
+    setDoGetFootersIsSet(false);
+    this.doGetFooters = false;
+    this.type = null;
+  }
+
+  public int getFileIdsSize() {
+    return (this.fileIds == null) ? 0 : this.fileIds.size();
+  }
+
+  public java.util.Iterator<Long> getFileIdsIterator() {
+    return (this.fileIds == null) ? null : this.fileIds.iterator();
+  }
+
+  public void addToFileIds(long elem) {
+    if (this.fileIds == null) {
+      this.fileIds = new ArrayList<Long>();
+    }
+    this.fileIds.add(elem);
+  }
+
+  public List<Long> getFileIds() {
+    return this.fileIds;
+  }
+
+  public void setFileIds(List<Long> fileIds) {
+    this.fileIds = fileIds;
+  }
+
+  public void unsetFileIds() {
+    this.fileIds = null;
+  }
+
+  /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetFileIds() {
+    return this.fileIds != null;
+  }
+
+  public void setFileIdsIsSet(boolean value) {
+    if (!value) {
+      this.fileIds = null;
+    }
+  }
+
+  public byte[] getExpr() {
+    setExpr(org.apache.thrift.TBaseHelper.rightSize(expr));
+    return expr == null ? null : expr.array();
+  }
+
+  public ByteBuffer bufferForExpr() {
+    return org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  public void setExpr(byte[] expr) {
+    this.expr = expr == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(expr, expr.length));
+  }
+
+  public void setExpr(ByteBuffer expr) {
+    this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  public void unsetExpr() {
+    this.expr = null;
+  }
+
+  /** Returns true if field expr is set (has been assigned a value) and false otherwise */
+  public boolean isSetExpr() {
+    return this.expr != null;
+  }
+
+  public void setExprIsSet(boolean value) {
+    if (!value) {
+      this.expr = null;
+    }
+  }
+
+  public boolean isDoGetFooters() {
+    return this.doGetFooters;
+  }
+
+  public void setDoGetFooters(boolean doGetFooters) {
+    this.doGetFooters = doGetFooters;
+    setDoGetFootersIsSet(true);
+  }
+
+  public void unsetDoGetFooters() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DOGETFOOTERS_ISSET_ID);
+  }
+
+  /** Returns true if field doGetFooters is set (has been assigned a value) and false otherwise */
+  public boolean isSetDoGetFooters() {
+    return EncodingUtils.testBit(__isset_bitfield, __DOGETFOOTERS_ISSET_ID);
+  }
+
+  public void setDoGetFootersIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DOGETFOOTERS_ISSET_ID, value);
+  }
+
+  /**
+   * 
+   * @see FileMetadataExprType
+   */
+  public FileMetadataExprType getType() {
+    return this.type;
+  }
+
+  /**
+   * 
+   * @see FileMetadataExprType
+   */
+  public void setType(FileMetadataExprType type) {
+    this.type = type;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FILE_IDS:
+      if (value == null) {
+        unsetFileIds();
+      } else {
+        setFileIds((List<Long>)value);
+      }
+      break;
+
+    case EXPR:
+      if (value == null) {
+        unsetExpr();
+      } else {
+        setExpr((ByteBuffer)value);
+      }
+      break;
+
+    case DO_GET_FOOTERS:
+      if (value == null) {
+        unsetDoGetFooters();
+      } else {
+        setDoGetFooters((Boolean)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((FileMetadataExprType)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FILE_IDS:
+      return getFileIds();
+
+    case EXPR:
+      return getExpr();
+
+    case DO_GET_FOOTERS:
+      return isDoGetFooters();
+
+    case TYPE:
+      return getType();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FILE_IDS:
+      return isSetFileIds();
+    case EXPR:
+      return isSetExpr();
+    case DO_GET_FOOTERS:
+      return isSetDoGetFooters();
+    case TYPE:
+      return isSetType();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetFileMetadataByExprRequest)
+      return this.equals((GetFileMetadataByExprRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetFileMetadataByExprRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_fileIds = true && this.isSetFileIds();
+    boolean that_present_fileIds = true && that.isSetFileIds();
+    if (this_present_fileIds || that_present_fileIds) {
+      if (!(this_present_fileIds && that_present_fileIds))
+        return false;
+      if (!this.fileIds.equals(that.fileIds))
+        return false;
+    }
+
+    boolean this_present_expr = true && this.isSetExpr();
+    boolean that_present_expr = true && that.isSetExpr();
+    if (this_present_expr || that_present_expr) {
+      if (!(this_present_expr && that_present_expr))
+        return false;
+      if (!this.expr.equals(that.expr))
+        return false;
+    }
+
+    boolean this_present_doGetFooters = true && this.isSetDoGetFooters();
+    boolean that_present_doGetFooters = true && that.isSetDoGetFooters();
+    if (this_present_doGetFooters || that_present_doGetFooters) {
+      if (!(this_present_doGetFooters && that_present_doGetFooters))
+        return false;
+      if (this.doGetFooters != that.doGetFooters)
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_fileIds = true && (isSetFileIds());
+    list.add(present_fileIds);
+    if (present_fileIds)
+      list.add(fileIds);
+
+    boolean present_expr = true && (isSetExpr());
+    list.add(present_expr);
+    if (present_expr)
+      list.add(expr);
+
+    boolean present_doGetFooters = true && (isSetDoGetFooters());
+    list.add(present_doGetFooters);
+    if (present_doGetFooters)
+      list.add(doGetFooters);
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type.getValue());
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetFileMetadataByExprRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFileIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetExpr()).compareTo(other.isSetExpr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetExpr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.expr, other.expr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDoGetFooters()).compareTo(other.isSetDoGetFooters());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDoGetFooters()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.doGetFooters, other.doGetFooters);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetFileMetadataByExprRequest(");
+    boolean first = true;
+
+    sb.append("fileIds:");
+    if (this.fileIds == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fileIds);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("expr:");
+    if (this.expr == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.expr, sb);
+    }
+    first = false;
+    if (isSetDoGetFooters()) {
+      if (!first) sb.append(", ");
+      sb.append("doGetFooters:");
+      sb.append(this.doGetFooters);
+      first = false;
+    }
+    if (isSetType()) {
+      if (!first) sb.append(", ");
+      sb.append("type:");
+      if (this.type == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.type);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFileIds()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString());
+    }
+
+    if (!isSetExpr()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'expr' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetFileMetadataByExprRequestStandardSchemeFactory implements SchemeFactory {
+    public GetFileMetadataByExprRequestStandardScheme getScheme() {
+      return new GetFileMetadataByExprRequestStandardScheme();
+    }
+  }
+
+  private static class GetFileMetadataByExprRequestStandardScheme extends StandardScheme<GetFileMetadataByExprRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FILE_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list782 = iprot.readListBegin();
+                struct.fileIds = new ArrayList<Long>(_list782.size);
+                long _elem783;
+                for (int _i784 = 0; _i784 < _list782.size; ++_i784)
+                {
+                  _elem783 = iprot.readI64();
+                  struct.fileIds.add(_elem783);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFileIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // EXPR
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.expr = iprot.readBinary();
+              struct.setExprIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DO_GET_FOOTERS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.doGetFooters = iprot.readBool();
+              struct.setDoGetFootersIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.type = org.apache.hadoop.hive.metastore.api.FileMetadataExprType.findByValue(iprot.readI32());
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.fileIds != null) {
+        oprot.writeFieldBegin(FILE_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size()));
+          for (long _iter785 : struct.fileIds)
+          {
+            oprot.writeI64(_iter785);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.expr != null) {
+        oprot.writeFieldBegin(EXPR_FIELD_DESC);
+        oprot.writeBinary(struct.expr);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetDoGetFooters()) {
+        oprot.writeFieldBegin(DO_GET_FOOTERS_FIELD_DESC);
+        oprot.writeBool(struct.doGetFooters);
+        oprot.writeFieldEnd();
+      }
+      if (struct.type != null) {
+        if (struct.isSetType()) {
+          oprot.writeFieldBegin(TYPE_FIELD_DESC);
+          oprot.writeI32(struct.type.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetFileMetadataByExprRequestTupleSchemeFactory implements SchemeFactory {
+    public GetFileMetadataByExprRequestTupleScheme getScheme() {
+      return new GetFileMetadataByExprRequestTupleScheme();
+    }
+  }
+
+  private static class GetFileMetadataByExprRequestTupleScheme extends TupleScheme<GetFileMetadataByExprRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.fileIds.size());
+        for (long _iter786 : struct.fileIds)
+        {
+          oprot.writeI64(_iter786);
+        }
+      }
+      oprot.writeBinary(struct.expr);
+      BitSet optionals = new BitSet();
+      if (struct.isSetDoGetFooters()) {
+        optionals.set(0);
+      }
+      if (struct.isSetType()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetDoGetFooters()) {
+        oprot.writeBool(struct.doGetFooters);
+      }
+      if (struct.isSetType()) {
+        oprot.writeI32(struct.type.getValue());
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list787 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.fileIds = new ArrayList<Long>(_list787.size);
+        long _elem788;
+        for (int _i789 = 0; _i789 < _list787.size; ++_i789)
+        {
+          _elem788 = iprot.readI64();
+          struct.fileIds.add(_elem788);
+        }
+      }
+      struct.setFileIdsIsSet(true);
+      struct.expr = iprot.readBinary();
+      struct.setExprIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.doGetFooters = iprot.readBool();
+        struct.setDoGetFootersIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.type = org.apache.hadoop.hive.metastore.api.FileMetadataExprType.findByValue(iprot.readI32());
+        struct.setTypeIsSet(true);
+      }
+    }
+  }
+
+}
+


[31/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java
new file mode 100644
index 0000000..a9c5892
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropConstraintRequest.java
@@ -0,0 +1,701 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DropConstraintRequest implements org.apache.thrift.TBase<DropConstraintRequest, DropConstraintRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DropConstraintRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropConstraintRequest");
+
+  private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField CONSTRAINTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("constraintname", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DropConstraintRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DropConstraintRequestTupleSchemeFactory());
+  }
+
+  private String dbname; // required
+  private String tablename; // required
+  private String constraintname; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DBNAME((short)1, "dbname"),
+    TABLENAME((short)2, "tablename"),
+    CONSTRAINTNAME((short)3, "constraintname"),
+    CAT_NAME((short)4, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DBNAME
+          return DBNAME;
+        case 2: // TABLENAME
+          return TABLENAME;
+        case 3: // CONSTRAINTNAME
+          return CONSTRAINTNAME;
+        case 4: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLENAME, new org.apache.thrift.meta_data.FieldMetaData("tablename", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CONSTRAINTNAME, new org.apache.thrift.meta_data.FieldMetaData("constraintname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropConstraintRequest.class, metaDataMap);
+  }
+
+  public DropConstraintRequest() {
+  }
+
+  public DropConstraintRequest(
+    String dbname,
+    String tablename,
+    String constraintname)
+  {
+    this();
+    this.dbname = dbname;
+    this.tablename = tablename;
+    this.constraintname = constraintname;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DropConstraintRequest(DropConstraintRequest other) {
+    if (other.isSetDbname()) {
+      this.dbname = other.dbname;
+    }
+    if (other.isSetTablename()) {
+      this.tablename = other.tablename;
+    }
+    if (other.isSetConstraintname()) {
+      this.constraintname = other.constraintname;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public DropConstraintRequest deepCopy() {
+    return new DropConstraintRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbname = null;
+    this.tablename = null;
+    this.constraintname = null;
+    this.catName = null;
+  }
+
+  public String getDbname() {
+    return this.dbname;
+  }
+
+  public void setDbname(String dbname) {
+    this.dbname = dbname;
+  }
+
+  public void unsetDbname() {
+    this.dbname = null;
+  }
+
+  /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbname() {
+    return this.dbname != null;
+  }
+
+  public void setDbnameIsSet(boolean value) {
+    if (!value) {
+      this.dbname = null;
+    }
+  }
+
+  public String getTablename() {
+    return this.tablename;
+  }
+
+  public void setTablename(String tablename) {
+    this.tablename = tablename;
+  }
+
+  public void unsetTablename() {
+    this.tablename = null;
+  }
+
+  /** Returns true if field tablename is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablename() {
+    return this.tablename != null;
+  }
+
+  public void setTablenameIsSet(boolean value) {
+    if (!value) {
+      this.tablename = null;
+    }
+  }
+
+  public String getConstraintname() {
+    return this.constraintname;
+  }
+
+  public void setConstraintname(String constraintname) {
+    this.constraintname = constraintname;
+  }
+
+  public void unsetConstraintname() {
+    this.constraintname = null;
+  }
+
+  /** Returns true if field constraintname is set (has been assigned a value) and false otherwise */
+  public boolean isSetConstraintname() {
+    return this.constraintname != null;
+  }
+
+  public void setConstraintnameIsSet(boolean value) {
+    if (!value) {
+      this.constraintname = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DBNAME:
+      if (value == null) {
+        unsetDbname();
+      } else {
+        setDbname((String)value);
+      }
+      break;
+
+    case TABLENAME:
+      if (value == null) {
+        unsetTablename();
+      } else {
+        setTablename((String)value);
+      }
+      break;
+
+    case CONSTRAINTNAME:
+      if (value == null) {
+        unsetConstraintname();
+      } else {
+        setConstraintname((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DBNAME:
+      return getDbname();
+
+    case TABLENAME:
+      return getTablename();
+
+    case CONSTRAINTNAME:
+      return getConstraintname();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DBNAME:
+      return isSetDbname();
+    case TABLENAME:
+      return isSetTablename();
+    case CONSTRAINTNAME:
+      return isSetConstraintname();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DropConstraintRequest)
+      return this.equals((DropConstraintRequest)that);
+    return false;
+  }
+
+  public boolean equals(DropConstraintRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbname = true && this.isSetDbname();
+    boolean that_present_dbname = true && that.isSetDbname();
+    if (this_present_dbname || that_present_dbname) {
+      if (!(this_present_dbname && that_present_dbname))
+        return false;
+      if (!this.dbname.equals(that.dbname))
+        return false;
+    }
+
+    boolean this_present_tablename = true && this.isSetTablename();
+    boolean that_present_tablename = true && that.isSetTablename();
+    if (this_present_tablename || that_present_tablename) {
+      if (!(this_present_tablename && that_present_tablename))
+        return false;
+      if (!this.tablename.equals(that.tablename))
+        return false;
+    }
+
+    boolean this_present_constraintname = true && this.isSetConstraintname();
+    boolean that_present_constraintname = true && that.isSetConstraintname();
+    if (this_present_constraintname || that_present_constraintname) {
+      if (!(this_present_constraintname && that_present_constraintname))
+        return false;
+      if (!this.constraintname.equals(that.constraintname))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbname = true && (isSetDbname());
+    list.add(present_dbname);
+    if (present_dbname)
+      list.add(dbname);
+
+    boolean present_tablename = true && (isSetTablename());
+    list.add(present_tablename);
+    if (present_tablename)
+      list.add(tablename);
+
+    boolean present_constraintname = true && (isSetConstraintname());
+    list.add(present_constraintname);
+    if (present_constraintname)
+      list.add(constraintname);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DropConstraintRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTablename()).compareTo(other.isSetTablename());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablename()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename, other.tablename);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetConstraintname()).compareTo(other.isSetConstraintname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetConstraintname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.constraintname, other.constraintname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DropConstraintRequest(");
+    boolean first = true;
+
+    sb.append("dbname:");
+    if (this.dbname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbname);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tablename:");
+    if (this.tablename == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tablename);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("constraintname:");
+    if (this.constraintname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.constraintname);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbname' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTablename()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablename' is unset! Struct:" + toString());
+    }
+
+    if (!isSetConstraintname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'constraintname' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DropConstraintRequestStandardSchemeFactory implements SchemeFactory {
+    public DropConstraintRequestStandardScheme getScheme() {
+      return new DropConstraintRequestStandardScheme();
+    }
+  }
+
+  private static class DropConstraintRequestStandardScheme extends StandardScheme<DropConstraintRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DBNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbname = iprot.readString();
+              struct.setDbnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLENAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tablename = iprot.readString();
+              struct.setTablenameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CONSTRAINTNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.constraintname = iprot.readString();
+              struct.setConstraintnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbname != null) {
+        oprot.writeFieldBegin(DBNAME_FIELD_DESC);
+        oprot.writeString(struct.dbname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tablename != null) {
+        oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
+        oprot.writeString(struct.tablename);
+        oprot.writeFieldEnd();
+      }
+      if (struct.constraintname != null) {
+        oprot.writeFieldBegin(CONSTRAINTNAME_FIELD_DESC);
+        oprot.writeString(struct.constraintname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DropConstraintRequestTupleSchemeFactory implements SchemeFactory {
+    public DropConstraintRequestTupleScheme getScheme() {
+      return new DropConstraintRequestTupleScheme();
+    }
+  }
+
+  private static class DropConstraintRequestTupleScheme extends TupleScheme<DropConstraintRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbname);
+      oprot.writeString(struct.tablename);
+      oprot.writeString(struct.constraintname);
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DropConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbname = iprot.readString();
+      struct.setDbnameIsSet(true);
+      struct.tablename = iprot.readString();
+      struct.setTablenameIsSet(true);
+      struct.constraintname = iprot.readString();
+      struct.setConstraintnameIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
new file mode 100644
index 0000000..8a07abf
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsExpr.java
@@ -0,0 +1,505 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DropPartitionsExpr implements org.apache.thrift.TBase<DropPartitionsExpr, DropPartitionsExpr._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsExpr> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsExpr");
+
+  private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField PART_ARCHIVE_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("partArchiveLevel", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DropPartitionsExprStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DropPartitionsExprTupleSchemeFactory());
+  }
+
+  private ByteBuffer expr; // required
+  private int partArchiveLevel; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    EXPR((short)1, "expr"),
+    PART_ARCHIVE_LEVEL((short)2, "partArchiveLevel");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // EXPR
+          return EXPR;
+        case 2: // PART_ARCHIVE_LEVEL
+          return PART_ARCHIVE_LEVEL;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __PARTARCHIVELEVEL_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.PART_ARCHIVE_LEVEL};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.EXPR, new org.apache.thrift.meta_data.FieldMetaData("expr", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    tmpMap.put(_Fields.PART_ARCHIVE_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("partArchiveLevel", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropPartitionsExpr.class, metaDataMap);
+  }
+
+  public DropPartitionsExpr() {
+  }
+
+  public DropPartitionsExpr(
+    ByteBuffer expr)
+  {
+    this();
+    this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DropPartitionsExpr(DropPartitionsExpr other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetExpr()) {
+      this.expr = org.apache.thrift.TBaseHelper.copyBinary(other.expr);
+    }
+    this.partArchiveLevel = other.partArchiveLevel;
+  }
+
+  public DropPartitionsExpr deepCopy() {
+    return new DropPartitionsExpr(this);
+  }
+
+  @Override
+  public void clear() {
+    this.expr = null;
+    setPartArchiveLevelIsSet(false);
+    this.partArchiveLevel = 0;
+  }
+
+  public byte[] getExpr() {
+    setExpr(org.apache.thrift.TBaseHelper.rightSize(expr));
+    return expr == null ? null : expr.array();
+  }
+
+  public ByteBuffer bufferForExpr() {
+    return org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  public void setExpr(byte[] expr) {
+    this.expr = expr == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(expr, expr.length));
+  }
+
+  public void setExpr(ByteBuffer expr) {
+    this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  public void unsetExpr() {
+    this.expr = null;
+  }
+
+  /** Returns true if field expr is set (has been assigned a value) and false otherwise */
+  public boolean isSetExpr() {
+    return this.expr != null;
+  }
+
+  public void setExprIsSet(boolean value) {
+    if (!value) {
+      this.expr = null;
+    }
+  }
+
+  public int getPartArchiveLevel() {
+    return this.partArchiveLevel;
+  }
+
+  public void setPartArchiveLevel(int partArchiveLevel) {
+    this.partArchiveLevel = partArchiveLevel;
+    setPartArchiveLevelIsSet(true);
+  }
+
+  public void unsetPartArchiveLevel() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PARTARCHIVELEVEL_ISSET_ID);
+  }
+
+  /** Returns true if field partArchiveLevel is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartArchiveLevel() {
+    return EncodingUtils.testBit(__isset_bitfield, __PARTARCHIVELEVEL_ISSET_ID);
+  }
+
+  public void setPartArchiveLevelIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTARCHIVELEVEL_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case EXPR:
+      if (value == null) {
+        unsetExpr();
+      } else {
+        setExpr((ByteBuffer)value);
+      }
+      break;
+
+    case PART_ARCHIVE_LEVEL:
+      if (value == null) {
+        unsetPartArchiveLevel();
+      } else {
+        setPartArchiveLevel((Integer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case EXPR:
+      return getExpr();
+
+    case PART_ARCHIVE_LEVEL:
+      return getPartArchiveLevel();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case EXPR:
+      return isSetExpr();
+    case PART_ARCHIVE_LEVEL:
+      return isSetPartArchiveLevel();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DropPartitionsExpr)
+      return this.equals((DropPartitionsExpr)that);
+    return false;
+  }
+
+  public boolean equals(DropPartitionsExpr that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_expr = true && this.isSetExpr();
+    boolean that_present_expr = true && that.isSetExpr();
+    if (this_present_expr || that_present_expr) {
+      if (!(this_present_expr && that_present_expr))
+        return false;
+      if (!this.expr.equals(that.expr))
+        return false;
+    }
+
+    boolean this_present_partArchiveLevel = true && this.isSetPartArchiveLevel();
+    boolean that_present_partArchiveLevel = true && that.isSetPartArchiveLevel();
+    if (this_present_partArchiveLevel || that_present_partArchiveLevel) {
+      if (!(this_present_partArchiveLevel && that_present_partArchiveLevel))
+        return false;
+      if (this.partArchiveLevel != that.partArchiveLevel)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_expr = true && (isSetExpr());
+    list.add(present_expr);
+    if (present_expr)
+      list.add(expr);
+
+    boolean present_partArchiveLevel = true && (isSetPartArchiveLevel());
+    list.add(present_partArchiveLevel);
+    if (present_partArchiveLevel)
+      list.add(partArchiveLevel);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DropPartitionsExpr other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetExpr()).compareTo(other.isSetExpr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetExpr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.expr, other.expr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartArchiveLevel()).compareTo(other.isSetPartArchiveLevel());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartArchiveLevel()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partArchiveLevel, other.partArchiveLevel);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DropPartitionsExpr(");
+    boolean first = true;
+
+    sb.append("expr:");
+    if (this.expr == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.expr, sb);
+    }
+    first = false;
+    if (isSetPartArchiveLevel()) {
+      if (!first) sb.append(", ");
+      sb.append("partArchiveLevel:");
+      sb.append(this.partArchiveLevel);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetExpr()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'expr' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DropPartitionsExprStandardSchemeFactory implements SchemeFactory {
+    public DropPartitionsExprStandardScheme getScheme() {
+      return new DropPartitionsExprStandardScheme();
+    }
+  }
+
+  private static class DropPartitionsExprStandardScheme extends StandardScheme<DropPartitionsExpr> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsExpr struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // EXPR
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.expr = iprot.readBinary();
+              struct.setExprIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PART_ARCHIVE_LEVEL
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.partArchiveLevel = iprot.readI32();
+              struct.setPartArchiveLevelIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsExpr struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.expr != null) {
+        oprot.writeFieldBegin(EXPR_FIELD_DESC);
+        oprot.writeBinary(struct.expr);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetPartArchiveLevel()) {
+        oprot.writeFieldBegin(PART_ARCHIVE_LEVEL_FIELD_DESC);
+        oprot.writeI32(struct.partArchiveLevel);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DropPartitionsExprTupleSchemeFactory implements SchemeFactory {
+    public DropPartitionsExprTupleScheme getScheme() {
+      return new DropPartitionsExprTupleScheme();
+    }
+  }
+
+  private static class DropPartitionsExprTupleScheme extends TupleScheme<DropPartitionsExpr> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsExpr struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBinary(struct.expr);
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartArchiveLevel()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPartArchiveLevel()) {
+        oprot.writeI32(struct.partArchiveLevel);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsExpr struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.expr = iprot.readBinary();
+      struct.setExprIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.partArchiveLevel = iprot.readI32();
+        struct.setPartArchiveLevelIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
new file mode 100644
index 0000000..443f08e
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsRequest.java
@@ -0,0 +1,1218 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DropPartitionsRequest implements org.apache.thrift.TBase<DropPartitionsRequest, DropPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DropPartitionsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropPartitionsRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("parts", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+  private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField IF_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifExists", org.apache.thrift.protocol.TType.BOOL, (short)5);
+  private static final org.apache.thrift.protocol.TField IGNORE_PROTECTION_FIELD_DESC = new org.apache.thrift.protocol.TField("ignoreProtection", org.apache.thrift.protocol.TType.BOOL, (short)6);
+  private static final org.apache.thrift.protocol.TField ENVIRONMENT_CONTEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("environmentContext", org.apache.thrift.protocol.TType.STRUCT, (short)7);
+  private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)8);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DropPartitionsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DropPartitionsRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private RequestPartsSpec parts; // required
+  private boolean deleteData; // optional
+  private boolean ifExists; // optional
+  private boolean ignoreProtection; // optional
+  private EnvironmentContext environmentContext; // optional
+  private boolean needResult; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    PARTS((short)3, "parts"),
+    DELETE_DATA((short)4, "deleteData"),
+    IF_EXISTS((short)5, "ifExists"),
+    IGNORE_PROTECTION((short)6, "ignoreProtection"),
+    ENVIRONMENT_CONTEXT((short)7, "environmentContext"),
+    NEED_RESULT((short)8, "needResult"),
+    CAT_NAME((short)9, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // PARTS
+          return PARTS;
+        case 4: // DELETE_DATA
+          return DELETE_DATA;
+        case 5: // IF_EXISTS
+          return IF_EXISTS;
+        case 6: // IGNORE_PROTECTION
+          return IGNORE_PROTECTION;
+        case 7: // ENVIRONMENT_CONTEXT
+          return ENVIRONMENT_CONTEXT;
+        case 8: // NEED_RESULT
+          return NEED_RESULT;
+        case 9: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __DELETEDATA_ISSET_ID = 0;
+  private static final int __IFEXISTS_ISSET_ID = 1;
+  private static final int __IGNOREPROTECTION_ISSET_ID = 2;
+  private static final int __NEEDRESULT_ISSET_ID = 3;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.DELETE_DATA,_Fields.IF_EXISTS,_Fields.IGNORE_PROTECTION,_Fields.ENVIRONMENT_CONTEXT,_Fields.NEED_RESULT,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTS, new org.apache.thrift.meta_data.FieldMetaData("parts", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RequestPartsSpec.class)));
+    tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.IF_EXISTS, new org.apache.thrift.meta_data.FieldMetaData("ifExists", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.IGNORE_PROTECTION, new org.apache.thrift.meta_data.FieldMetaData("ignoreProtection", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.ENVIRONMENT_CONTEXT, new org.apache.thrift.meta_data.FieldMetaData("environmentContext", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, EnvironmentContext.class)));
+    tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropPartitionsRequest.class, metaDataMap);
+  }
+
+  public DropPartitionsRequest() {
+    this.ifExists = true;
+
+    this.needResult = true;
+
+  }
+
+  public DropPartitionsRequest(
+    String dbName,
+    String tblName,
+    RequestPartsSpec parts)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+    this.parts = parts;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DropPartitionsRequest(DropPartitionsRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetParts()) {
+      this.parts = new RequestPartsSpec(other.parts);
+    }
+    this.deleteData = other.deleteData;
+    this.ifExists = other.ifExists;
+    this.ignoreProtection = other.ignoreProtection;
+    if (other.isSetEnvironmentContext()) {
+      this.environmentContext = new EnvironmentContext(other.environmentContext);
+    }
+    this.needResult = other.needResult;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public DropPartitionsRequest deepCopy() {
+    return new DropPartitionsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.parts = null;
+    setDeleteDataIsSet(false);
+    this.deleteData = false;
+    this.ifExists = true;
+
+    setIgnoreProtectionIsSet(false);
+    this.ignoreProtection = false;
+    this.environmentContext = null;
+    this.needResult = true;
+
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public RequestPartsSpec getParts() {
+    return this.parts;
+  }
+
+  public void setParts(RequestPartsSpec parts) {
+    this.parts = parts;
+  }
+
+  public void unsetParts() {
+    this.parts = null;
+  }
+
+  /** Returns true if field parts is set (has been assigned a value) and false otherwise */
+  public boolean isSetParts() {
+    return this.parts != null;
+  }
+
+  public void setPartsIsSet(boolean value) {
+    if (!value) {
+      this.parts = null;
+    }
+  }
+
+  public boolean isDeleteData() {
+    return this.deleteData;
+  }
+
+  public void setDeleteData(boolean deleteData) {
+    this.deleteData = deleteData;
+    setDeleteDataIsSet(true);
+  }
+
+  public void unsetDeleteData() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID);
+  }
+
+  /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */
+  public boolean isSetDeleteData() {
+    return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID);
+  }
+
+  public void setDeleteDataIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value);
+  }
+
+  public boolean isIfExists() {
+    return this.ifExists;
+  }
+
+  public void setIfExists(boolean ifExists) {
+    this.ifExists = ifExists;
+    setIfExistsIsSet(true);
+  }
+
+  public void unsetIfExists() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __IFEXISTS_ISSET_ID);
+  }
+
+  /** Returns true if field ifExists is set (has been assigned a value) and false otherwise */
+  public boolean isSetIfExists() {
+    return EncodingUtils.testBit(__isset_bitfield, __IFEXISTS_ISSET_ID);
+  }
+
+  public void setIfExistsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __IFEXISTS_ISSET_ID, value);
+  }
+
+  public boolean isIgnoreProtection() {
+    return this.ignoreProtection;
+  }
+
+  public void setIgnoreProtection(boolean ignoreProtection) {
+    this.ignoreProtection = ignoreProtection;
+    setIgnoreProtectionIsSet(true);
+  }
+
+  public void unsetIgnoreProtection() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __IGNOREPROTECTION_ISSET_ID);
+  }
+
+  /** Returns true if field ignoreProtection is set (has been assigned a value) and false otherwise */
+  public boolean isSetIgnoreProtection() {
+    return EncodingUtils.testBit(__isset_bitfield, __IGNOREPROTECTION_ISSET_ID);
+  }
+
+  public void setIgnoreProtectionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __IGNOREPROTECTION_ISSET_ID, value);
+  }
+
+  public EnvironmentContext getEnvironmentContext() {
+    return this.environmentContext;
+  }
+
+  public void setEnvironmentContext(EnvironmentContext environmentContext) {
+    this.environmentContext = environmentContext;
+  }
+
+  public void unsetEnvironmentContext() {
+    this.environmentContext = null;
+  }
+
+  /** Returns true if field environmentContext is set (has been assigned a value) and false otherwise */
+  public boolean isSetEnvironmentContext() {
+    return this.environmentContext != null;
+  }
+
+  public void setEnvironmentContextIsSet(boolean value) {
+    if (!value) {
+      this.environmentContext = null;
+    }
+  }
+
+  public boolean isNeedResult() {
+    return this.needResult;
+  }
+
+  public void setNeedResult(boolean needResult) {
+    this.needResult = needResult;
+    setNeedResultIsSet(true);
+  }
+
+  public void unsetNeedResult() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NEEDRESULT_ISSET_ID);
+  }
+
+  /** Returns true if field needResult is set (has been assigned a value) and false otherwise */
+  public boolean isSetNeedResult() {
+    return EncodingUtils.testBit(__isset_bitfield, __NEEDRESULT_ISSET_ID);
+  }
+
+  public void setNeedResultIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value);
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case PARTS:
+      if (value == null) {
+        unsetParts();
+      } else {
+        setParts((RequestPartsSpec)value);
+      }
+      break;
+
+    case DELETE_DATA:
+      if (value == null) {
+        unsetDeleteData();
+      } else {
+        setDeleteData((Boolean)value);
+      }
+      break;
+
+    case IF_EXISTS:
+      if (value == null) {
+        unsetIfExists();
+      } else {
+        setIfExists((Boolean)value);
+      }
+      break;
+
+    case IGNORE_PROTECTION:
+      if (value == null) {
+        unsetIgnoreProtection();
+      } else {
+        setIgnoreProtection((Boolean)value);
+      }
+      break;
+
+    case ENVIRONMENT_CONTEXT:
+      if (value == null) {
+        unsetEnvironmentContext();
+      } else {
+        setEnvironmentContext((EnvironmentContext)value);
+      }
+      break;
+
+    case NEED_RESULT:
+      if (value == null) {
+        unsetNeedResult();
+      } else {
+        setNeedResult((Boolean)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case PARTS:
+      return getParts();
+
+    case DELETE_DATA:
+      return isDeleteData();
+
+    case IF_EXISTS:
+      return isIfExists();
+
+    case IGNORE_PROTECTION:
+      return isIgnoreProtection();
+
+    case ENVIRONMENT_CONTEXT:
+      return getEnvironmentContext();
+
+    case NEED_RESULT:
+      return isNeedResult();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case PARTS:
+      return isSetParts();
+    case DELETE_DATA:
+      return isSetDeleteData();
+    case IF_EXISTS:
+      return isSetIfExists();
+    case IGNORE_PROTECTION:
+      return isSetIgnoreProtection();
+    case ENVIRONMENT_CONTEXT:
+      return isSetEnvironmentContext();
+    case NEED_RESULT:
+      return isSetNeedResult();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DropPartitionsRequest)
+      return this.equals((DropPartitionsRequest)that);
+    return false;
+  }
+
+  public boolean equals(DropPartitionsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_parts = true && this.isSetParts();
+    boolean that_present_parts = true && that.isSetParts();
+    if (this_present_parts || that_present_parts) {
+      if (!(this_present_parts && that_present_parts))
+        return false;
+      if (!this.parts.equals(that.parts))
+        return false;
+    }
+
+    boolean this_present_deleteData = true && this.isSetDeleteData();
+    boolean that_present_deleteData = true && that.isSetDeleteData();
+    if (this_present_deleteData || that_present_deleteData) {
+      if (!(this_present_deleteData && that_present_deleteData))
+        return false;
+      if (this.deleteData != that.deleteData)
+        return false;
+    }
+
+    boolean this_present_ifExists = true && this.isSetIfExists();
+    boolean that_present_ifExists = true && that.isSetIfExists();
+    if (this_present_ifExists || that_present_ifExists) {
+      if (!(this_present_ifExists && that_present_ifExists))
+        return false;
+      if (this.ifExists != that.ifExists)
+        return false;
+    }
+
+    boolean this_present_ignoreProtection = true && this.isSetIgnoreProtection();
+    boolean that_present_ignoreProtection = true && that.isSetIgnoreProtection();
+    if (this_present_ignoreProtection || that_present_ignoreProtection) {
+      if (!(this_present_ignoreProtection && that_present_ignoreProtection))
+        return false;
+      if (this.ignoreProtection != that.ignoreProtection)
+        return false;
+    }
+
+    boolean this_present_environmentContext = true && this.isSetEnvironmentContext();
+    boolean that_present_environmentContext = true && that.isSetEnvironmentContext();
+    if (this_present_environmentContext || that_present_environmentContext) {
+      if (!(this_present_environmentContext && that_present_environmentContext))
+        return false;
+      if (!this.environmentContext.equals(that.environmentContext))
+        return false;
+    }
+
+    boolean this_present_needResult = true && this.isSetNeedResult();
+    boolean that_present_needResult = true && that.isSetNeedResult();
+    if (this_present_needResult || that_present_needResult) {
+      if (!(this_present_needResult && that_present_needResult))
+        return false;
+      if (this.needResult != that.needResult)
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_parts = true && (isSetParts());
+    list.add(present_parts);
+    if (present_parts)
+      list.add(parts);
+
+    boolean present_deleteData = true && (isSetDeleteData());
+    list.add(present_deleteData);
+    if (present_deleteData)
+      list.add(deleteData);
+
+    boolean present_ifExists = true && (isSetIfExists());
+    list.add(present_ifExists);
+    if (present_ifExists)
+      list.add(ifExists);
+
+    boolean present_ignoreProtection = true && (isSetIgnoreProtection());
+    list.add(present_ignoreProtection);
+    if (present_ignoreProtection)
+      list.add(ignoreProtection);
+
+    boolean present_environmentContext = true && (isSetEnvironmentContext());
+    list.add(present_environmentContext);
+    if (present_environmentContext)
+      list.add(environmentContext);
+
+    boolean present_needResult = true && (isSetNeedResult());
+    list.add(present_needResult);
+    if (present_needResult)
+      list.add(needResult);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DropPartitionsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetParts()).compareTo(other.isSetParts());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParts()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parts, other.parts);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDeleteData()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIfExists()).compareTo(other.isSetIfExists());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIfExists()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ifExists, other.ifExists);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIgnoreProtection()).compareTo(other.isSetIgnoreProtection());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIgnoreProtection()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ignoreProtection, other.ignoreProtection);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEnvironmentContext()).compareTo(other.isSetEnvironmentContext());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEnvironmentContext()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.environmentContext, other.environmentContext);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNeedResult()).compareTo(other.isSetNeedResult());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNeedResult()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.needResult, other.needResult);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DropPartitionsRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("parts:");
+    if (this.parts == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parts);
+    }
+    first = false;
+    if (isSetDeleteData()) {
+      if (!first) sb.append(", ");
+      sb.append("deleteData:");
+      sb.append(this.deleteData);
+      first = false;
+    }
+    if (isSetIfExists()) {
+      if (!first) sb.append(", ");
+      sb.append("ifExists:");
+      sb.append(this.ifExists);
+      first = false;
+    }
+    if (isSetIgnoreProtection()) {
+      if (!first) sb.append(", ");
+      sb.append("ignoreProtection:");
+      sb.append(this.ignoreProtection);
+      first = false;
+    }
+    if (isSetEnvironmentContext()) {
+      if (!first) sb.append(", ");
+      sb.append("environmentContext:");
+      if (this.environmentContext == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.environmentContext);
+      }
+      first = false;
+    }
+    if (isSetNeedResult()) {
+      if (!first) sb.append(", ");
+      sb.append("needResult:");
+      sb.append(this.needResult);
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetParts()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'parts' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (environmentContext != null) {
+      environmentContext.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DropPartitionsRequestStandardSchemeFactory implements SchemeFactory {
+    public DropPartitionsRequestStandardScheme getScheme() {
+      return new DropPartitionsRequestStandardScheme();
+    }
+  }
+
+  private static class DropPartitionsRequestStandardScheme extends StandardScheme<DropPartitionsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PARTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.parts = new RequestPartsSpec();
+              struct.parts.read(iprot);
+              struct.setPartsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // DELETE_DATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.deleteData = iprot.readBool();
+              struct.setDeleteDataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // IF_EXISTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.ifExists = iprot.readBool();
+              struct.setIfExistsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // IGNORE_PROTECTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.ignoreProtection = iprot.readBool();
+              struct.setIgnoreProtectionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // ENVIRONMENT_CONTEXT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.environmentContext = new EnvironmentContext();
+              struct.environmentContext.read(iprot);
+              struct.setEnvironmentContextIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // NEED_RESULT
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.needResult = iprot.readBool();
+              struct.setNeedResultIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.parts != null) {
+        oprot.writeFieldBegin(PARTS_FIELD_DESC);
+        struct.parts.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetDeleteData()) {
+        oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC);
+        oprot.writeBool(struct.deleteData);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetIfExists()) {
+        oprot.writeFieldBegin(IF_EXISTS_FIELD_DESC);
+        oprot.writeBool(struct.ifExists);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetIgnoreProtection()) {
+        oprot.writeFieldBegin(IGNORE_PROTECTION_FIELD_DESC);
+        oprot.writeBool(struct.ignoreProtection);
+        oprot.writeFieldEnd();
+      }
+      if (struct.environmentContext != null) {
+        if (struct.isSetEnvironmentContext()) {
+          oprot.writeFieldBegin(ENVIRONMENT_CONTEXT_FIELD_DESC);
+          struct.environmentContext.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetNeedResult()) {
+        oprot.writeFieldBegin(NEED_RESULT_FIELD_DESC);
+        oprot.writeBool(struct.needResult);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DropPartitionsRequestTupleSchemeFactory implements SchemeFactory {
+    public DropPartitionsRequestTupleScheme getScheme() {
+      return new DropPartitionsRequestTupleScheme();
+    }
+  }
+
+  private static class DropPartitionsRequestTupleScheme extends TupleScheme<DropPartitionsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      struct.parts.write(oprot);
+      BitSet optionals = new BitSet();
+      if (struct.isSetDeleteData()) {
+        optionals.set(0);
+      }
+      if (struct.isSetIfExists()) {
+        optionals.set(1);
+      }
+      if (struct.isSetIgnoreProtection()) {
+        optionals.set(2);
+      }
+      if (struct.isSetEnvironmentContext()) {
+        optionals.set(3);
+      }
+      if (struct.isSetNeedResult()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(5);
+      }
+      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetDeleteData()) {
+        oprot.writeBool(struct.deleteData);
+      }
+      if (struct.isSetIfExists()) {
+        oprot.writeBool(struct.ifExists);
+      }
+      if (struct.isSetIgnoreProtection()) {
+        oprot.writeBool(struct.ignoreProtection);
+      }
+      if (struct.isSetEnvironmentContext()) {
+        struct.environmentContext.write(oprot);
+      }
+      if (struct.isSetNeedResult()) {
+        oprot.writeBool(struct.needResult);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      struct.parts = new RequestPartsSpec();
+      struct.parts.read(iprot);
+      struct.setPartsIsSet(true);
+      BitSet incoming = iprot.readBitSet(6);
+      if (incoming.get(0)) {
+        struct.deleteData = iprot.readBool();
+        struct.setDeleteDataIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.ifExists = iprot.readBool();
+        struct.setIfExistsIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.ignoreProtection = iprot.readBool();
+        struct.setIgnoreProtectionIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.environmentContext = new EnvironmentContext();
+        struct.environmentContext.read(iprot);
+        struct.setEnvironmentContextIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.needResult = iprot.readBool();
+        struct.setNeedResultIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+


[02/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java
new file mode 100644
index 0000000..a7cf241
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java
@@ -0,0 +1,1822 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLForeignKey implements org.apache.thrift.TBase<SQLForeignKey, SQLForeignKey._Fields>, java.io.Serializable, Cloneable, Comparable<SQLForeignKey> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLForeignKey");
+
+  private static final org.apache.thrift.protocol.TField PKTABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("pktable_db", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField PKTABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pktable_name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PKCOLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pkcolumn_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField FKTABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("fktable_db", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField FKTABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fktable_name", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField FKCOLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fkcolumn_name", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)7);
+  private static final org.apache.thrift.protocol.TField UPDATE_RULE_FIELD_DESC = new org.apache.thrift.protocol.TField("update_rule", org.apache.thrift.protocol.TType.I32, (short)8);
+  private static final org.apache.thrift.protocol.TField DELETE_RULE_FIELD_DESC = new org.apache.thrift.protocol.TField("delete_rule", org.apache.thrift.protocol.TType.I32, (short)9);
+  private static final org.apache.thrift.protocol.TField FK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fk_name", org.apache.thrift.protocol.TType.STRING, (short)10);
+  private static final org.apache.thrift.protocol.TField PK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pk_name", org.apache.thrift.protocol.TType.STRING, (short)11);
+  private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)12);
+  private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)13);
+  private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)14);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)15);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new SQLForeignKeyStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new SQLForeignKeyTupleSchemeFactory());
+  }
+
+  private String pktable_db; // required
+  private String pktable_name; // required
+  private String pkcolumn_name; // required
+  private String fktable_db; // required
+  private String fktable_name; // required
+  private String fkcolumn_name; // required
+  private int key_seq; // required
+  private int update_rule; // required
+  private int delete_rule; // required
+  private String fk_name; // required
+  private String pk_name; // required
+  private boolean enable_cstr; // required
+  private boolean validate_cstr; // required
+  private boolean rely_cstr; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PKTABLE_DB((short)1, "pktable_db"),
+    PKTABLE_NAME((short)2, "pktable_name"),
+    PKCOLUMN_NAME((short)3, "pkcolumn_name"),
+    FKTABLE_DB((short)4, "fktable_db"),
+    FKTABLE_NAME((short)5, "fktable_name"),
+    FKCOLUMN_NAME((short)6, "fkcolumn_name"),
+    KEY_SEQ((short)7, "key_seq"),
+    UPDATE_RULE((short)8, "update_rule"),
+    DELETE_RULE((short)9, "delete_rule"),
+    FK_NAME((short)10, "fk_name"),
+    PK_NAME((short)11, "pk_name"),
+    ENABLE_CSTR((short)12, "enable_cstr"),
+    VALIDATE_CSTR((short)13, "validate_cstr"),
+    RELY_CSTR((short)14, "rely_cstr"),
+    CAT_NAME((short)15, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PKTABLE_DB
+          return PKTABLE_DB;
+        case 2: // PKTABLE_NAME
+          return PKTABLE_NAME;
+        case 3: // PKCOLUMN_NAME
+          return PKCOLUMN_NAME;
+        case 4: // FKTABLE_DB
+          return FKTABLE_DB;
+        case 5: // FKTABLE_NAME
+          return FKTABLE_NAME;
+        case 6: // FKCOLUMN_NAME
+          return FKCOLUMN_NAME;
+        case 7: // KEY_SEQ
+          return KEY_SEQ;
+        case 8: // UPDATE_RULE
+          return UPDATE_RULE;
+        case 9: // DELETE_RULE
+          return DELETE_RULE;
+        case 10: // FK_NAME
+          return FK_NAME;
+        case 11: // PK_NAME
+          return PK_NAME;
+        case 12: // ENABLE_CSTR
+          return ENABLE_CSTR;
+        case 13: // VALIDATE_CSTR
+          return VALIDATE_CSTR;
+        case 14: // RELY_CSTR
+          return RELY_CSTR;
+        case 15: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __KEY_SEQ_ISSET_ID = 0;
+  private static final int __UPDATE_RULE_ISSET_ID = 1;
+  private static final int __DELETE_RULE_ISSET_ID = 2;
+  private static final int __ENABLE_CSTR_ISSET_ID = 3;
+  private static final int __VALIDATE_CSTR_ISSET_ID = 4;
+  private static final int __RELY_CSTR_ISSET_ID = 5;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PKTABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("pktable_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PKTABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("pktable_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PKCOLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("pkcolumn_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.FKTABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("fktable_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.FKTABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("fktable_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.FKCOLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("fkcolumn_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.KEY_SEQ, new org.apache.thrift.meta_data.FieldMetaData("key_seq", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.UPDATE_RULE, new org.apache.thrift.meta_data.FieldMetaData("update_rule", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.DELETE_RULE, new org.apache.thrift.meta_data.FieldMetaData("delete_rule", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.FK_NAME, new org.apache.thrift.meta_data.FieldMetaData("fk_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PK_NAME, new org.apache.thrift.meta_data.FieldMetaData("pk_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ENABLE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("enable_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.VALIDATE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("validate_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLForeignKey.class, metaDataMap);
+  }
+
+  public SQLForeignKey() {
+  }
+
+  public SQLForeignKey(
+    String pktable_db,
+    String pktable_name,
+    String pkcolumn_name,
+    String fktable_db,
+    String fktable_name,
+    String fkcolumn_name,
+    int key_seq,
+    int update_rule,
+    int delete_rule,
+    String fk_name,
+    String pk_name,
+    boolean enable_cstr,
+    boolean validate_cstr,
+    boolean rely_cstr)
+  {
+    this();
+    this.pktable_db = pktable_db;
+    this.pktable_name = pktable_name;
+    this.pkcolumn_name = pkcolumn_name;
+    this.fktable_db = fktable_db;
+    this.fktable_name = fktable_name;
+    this.fkcolumn_name = fkcolumn_name;
+    this.key_seq = key_seq;
+    setKey_seqIsSet(true);
+    this.update_rule = update_rule;
+    setUpdate_ruleIsSet(true);
+    this.delete_rule = delete_rule;
+    setDelete_ruleIsSet(true);
+    this.fk_name = fk_name;
+    this.pk_name = pk_name;
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public SQLForeignKey(SQLForeignKey other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetPktable_db()) {
+      this.pktable_db = other.pktable_db;
+    }
+    if (other.isSetPktable_name()) {
+      this.pktable_name = other.pktable_name;
+    }
+    if (other.isSetPkcolumn_name()) {
+      this.pkcolumn_name = other.pkcolumn_name;
+    }
+    if (other.isSetFktable_db()) {
+      this.fktable_db = other.fktable_db;
+    }
+    if (other.isSetFktable_name()) {
+      this.fktable_name = other.fktable_name;
+    }
+    if (other.isSetFkcolumn_name()) {
+      this.fkcolumn_name = other.fkcolumn_name;
+    }
+    this.key_seq = other.key_seq;
+    this.update_rule = other.update_rule;
+    this.delete_rule = other.delete_rule;
+    if (other.isSetFk_name()) {
+      this.fk_name = other.fk_name;
+    }
+    if (other.isSetPk_name()) {
+      this.pk_name = other.pk_name;
+    }
+    this.enable_cstr = other.enable_cstr;
+    this.validate_cstr = other.validate_cstr;
+    this.rely_cstr = other.rely_cstr;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public SQLForeignKey deepCopy() {
+    return new SQLForeignKey(this);
+  }
+
+  @Override
+  public void clear() {
+    this.pktable_db = null;
+    this.pktable_name = null;
+    this.pkcolumn_name = null;
+    this.fktable_db = null;
+    this.fktable_name = null;
+    this.fkcolumn_name = null;
+    setKey_seqIsSet(false);
+    this.key_seq = 0;
+    setUpdate_ruleIsSet(false);
+    this.update_rule = 0;
+    setDelete_ruleIsSet(false);
+    this.delete_rule = 0;
+    this.fk_name = null;
+    this.pk_name = null;
+    setEnable_cstrIsSet(false);
+    this.enable_cstr = false;
+    setValidate_cstrIsSet(false);
+    this.validate_cstr = false;
+    setRely_cstrIsSet(false);
+    this.rely_cstr = false;
+    this.catName = null;
+  }
+
+  public String getPktable_db() {
+    return this.pktable_db;
+  }
+
+  public void setPktable_db(String pktable_db) {
+    this.pktable_db = pktable_db;
+  }
+
+  public void unsetPktable_db() {
+    this.pktable_db = null;
+  }
+
+  /** Returns true if field pktable_db is set (has been assigned a value) and false otherwise */
+  public boolean isSetPktable_db() {
+    return this.pktable_db != null;
+  }
+
+  public void setPktable_dbIsSet(boolean value) {
+    if (!value) {
+      this.pktable_db = null;
+    }
+  }
+
+  public String getPktable_name() {
+    return this.pktable_name;
+  }
+
+  public void setPktable_name(String pktable_name) {
+    this.pktable_name = pktable_name;
+  }
+
+  public void unsetPktable_name() {
+    this.pktable_name = null;
+  }
+
+  /** Returns true if field pktable_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetPktable_name() {
+    return this.pktable_name != null;
+  }
+
+  public void setPktable_nameIsSet(boolean value) {
+    if (!value) {
+      this.pktable_name = null;
+    }
+  }
+
+  public String getPkcolumn_name() {
+    return this.pkcolumn_name;
+  }
+
+  public void setPkcolumn_name(String pkcolumn_name) {
+    this.pkcolumn_name = pkcolumn_name;
+  }
+
+  public void unsetPkcolumn_name() {
+    this.pkcolumn_name = null;
+  }
+
+  /** Returns true if field pkcolumn_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetPkcolumn_name() {
+    return this.pkcolumn_name != null;
+  }
+
+  public void setPkcolumn_nameIsSet(boolean value) {
+    if (!value) {
+      this.pkcolumn_name = null;
+    }
+  }
+
+  public String getFktable_db() {
+    return this.fktable_db;
+  }
+
+  public void setFktable_db(String fktable_db) {
+    this.fktable_db = fktable_db;
+  }
+
+  public void unsetFktable_db() {
+    this.fktable_db = null;
+  }
+
+  /** Returns true if field fktable_db is set (has been assigned a value) and false otherwise */
+  public boolean isSetFktable_db() {
+    return this.fktable_db != null;
+  }
+
+  public void setFktable_dbIsSet(boolean value) {
+    if (!value) {
+      this.fktable_db = null;
+    }
+  }
+
+  public String getFktable_name() {
+    return this.fktable_name;
+  }
+
+  public void setFktable_name(String fktable_name) {
+    this.fktable_name = fktable_name;
+  }
+
+  public void unsetFktable_name() {
+    this.fktable_name = null;
+  }
+
+  /** Returns true if field fktable_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetFktable_name() {
+    return this.fktable_name != null;
+  }
+
+  public void setFktable_nameIsSet(boolean value) {
+    if (!value) {
+      this.fktable_name = null;
+    }
+  }
+
+  public String getFkcolumn_name() {
+    return this.fkcolumn_name;
+  }
+
+  public void setFkcolumn_name(String fkcolumn_name) {
+    this.fkcolumn_name = fkcolumn_name;
+  }
+
+  public void unsetFkcolumn_name() {
+    this.fkcolumn_name = null;
+  }
+
+  /** Returns true if field fkcolumn_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetFkcolumn_name() {
+    return this.fkcolumn_name != null;
+  }
+
+  public void setFkcolumn_nameIsSet(boolean value) {
+    if (!value) {
+      this.fkcolumn_name = null;
+    }
+  }
+
+  public int getKey_seq() {
+    return this.key_seq;
+  }
+
+  public void setKey_seq(int key_seq) {
+    this.key_seq = key_seq;
+    setKey_seqIsSet(true);
+  }
+
+  public void unsetKey_seq() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEY_SEQ_ISSET_ID);
+  }
+
+  /** Returns true if field key_seq is set (has been assigned a value) and false otherwise */
+  public boolean isSetKey_seq() {
+    return EncodingUtils.testBit(__isset_bitfield, __KEY_SEQ_ISSET_ID);
+  }
+
+  public void setKey_seqIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEY_SEQ_ISSET_ID, value);
+  }
+
+  public int getUpdate_rule() {
+    return this.update_rule;
+  }
+
+  public void setUpdate_rule(int update_rule) {
+    this.update_rule = update_rule;
+    setUpdate_ruleIsSet(true);
+  }
+
+  public void unsetUpdate_rule() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATE_RULE_ISSET_ID);
+  }
+
+  /** Returns true if field update_rule is set (has been assigned a value) and false otherwise */
+  public boolean isSetUpdate_rule() {
+    return EncodingUtils.testBit(__isset_bitfield, __UPDATE_RULE_ISSET_ID);
+  }
+
+  public void setUpdate_ruleIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATE_RULE_ISSET_ID, value);
+  }
+
+  public int getDelete_rule() {
+    return this.delete_rule;
+  }
+
+  public void setDelete_rule(int delete_rule) {
+    this.delete_rule = delete_rule;
+    setDelete_ruleIsSet(true);
+  }
+
+  public void unsetDelete_rule() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETE_RULE_ISSET_ID);
+  }
+
+  /** Returns true if field delete_rule is set (has been assigned a value) and false otherwise */
+  public boolean isSetDelete_rule() {
+    return EncodingUtils.testBit(__isset_bitfield, __DELETE_RULE_ISSET_ID);
+  }
+
+  public void setDelete_ruleIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETE_RULE_ISSET_ID, value);
+  }
+
+  public String getFk_name() {
+    return this.fk_name;
+  }
+
+  public void setFk_name(String fk_name) {
+    this.fk_name = fk_name;
+  }
+
+  public void unsetFk_name() {
+    this.fk_name = null;
+  }
+
+  /** Returns true if field fk_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetFk_name() {
+    return this.fk_name != null;
+  }
+
+  public void setFk_nameIsSet(boolean value) {
+    if (!value) {
+      this.fk_name = null;
+    }
+  }
+
+  public String getPk_name() {
+    return this.pk_name;
+  }
+
+  public void setPk_name(String pk_name) {
+    this.pk_name = pk_name;
+  }
+
+  public void unsetPk_name() {
+    this.pk_name = null;
+  }
+
+  /** Returns true if field pk_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetPk_name() {
+    return this.pk_name != null;
+  }
+
+  public void setPk_nameIsSet(boolean value) {
+    if (!value) {
+      this.pk_name = null;
+    }
+  }
+
+  public boolean isEnable_cstr() {
+    return this.enable_cstr;
+  }
+
+  public void setEnable_cstr(boolean enable_cstr) {
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+  }
+
+  public void unsetEnable_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field enable_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetEnable_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  public void setEnable_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isValidate_cstr() {
+    return this.validate_cstr;
+  }
+
+  public void setValidate_cstr(boolean validate_cstr) {
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+  }
+
+  public void unsetValidate_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field validate_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidate_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  public void setValidate_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isRely_cstr() {
+    return this.rely_cstr;
+  }
+
+  public void setRely_cstr(boolean rely_cstr) {
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  public void unsetRely_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field rely_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetRely_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  public void setRely_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value);
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PKTABLE_DB:
+      if (value == null) {
+        unsetPktable_db();
+      } else {
+        setPktable_db((String)value);
+      }
+      break;
+
+    case PKTABLE_NAME:
+      if (value == null) {
+        unsetPktable_name();
+      } else {
+        setPktable_name((String)value);
+      }
+      break;
+
+    case PKCOLUMN_NAME:
+      if (value == null) {
+        unsetPkcolumn_name();
+      } else {
+        setPkcolumn_name((String)value);
+      }
+      break;
+
+    case FKTABLE_DB:
+      if (value == null) {
+        unsetFktable_db();
+      } else {
+        setFktable_db((String)value);
+      }
+      break;
+
+    case FKTABLE_NAME:
+      if (value == null) {
+        unsetFktable_name();
+      } else {
+        setFktable_name((String)value);
+      }
+      break;
+
+    case FKCOLUMN_NAME:
+      if (value == null) {
+        unsetFkcolumn_name();
+      } else {
+        setFkcolumn_name((String)value);
+      }
+      break;
+
+    case KEY_SEQ:
+      if (value == null) {
+        unsetKey_seq();
+      } else {
+        setKey_seq((Integer)value);
+      }
+      break;
+
+    case UPDATE_RULE:
+      if (value == null) {
+        unsetUpdate_rule();
+      } else {
+        setUpdate_rule((Integer)value);
+      }
+      break;
+
+    case DELETE_RULE:
+      if (value == null) {
+        unsetDelete_rule();
+      } else {
+        setDelete_rule((Integer)value);
+      }
+      break;
+
+    case FK_NAME:
+      if (value == null) {
+        unsetFk_name();
+      } else {
+        setFk_name((String)value);
+      }
+      break;
+
+    case PK_NAME:
+      if (value == null) {
+        unsetPk_name();
+      } else {
+        setPk_name((String)value);
+      }
+      break;
+
+    case ENABLE_CSTR:
+      if (value == null) {
+        unsetEnable_cstr();
+      } else {
+        setEnable_cstr((Boolean)value);
+      }
+      break;
+
+    case VALIDATE_CSTR:
+      if (value == null) {
+        unsetValidate_cstr();
+      } else {
+        setValidate_cstr((Boolean)value);
+      }
+      break;
+
+    case RELY_CSTR:
+      if (value == null) {
+        unsetRely_cstr();
+      } else {
+        setRely_cstr((Boolean)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PKTABLE_DB:
+      return getPktable_db();
+
+    case PKTABLE_NAME:
+      return getPktable_name();
+
+    case PKCOLUMN_NAME:
+      return getPkcolumn_name();
+
+    case FKTABLE_DB:
+      return getFktable_db();
+
+    case FKTABLE_NAME:
+      return getFktable_name();
+
+    case FKCOLUMN_NAME:
+      return getFkcolumn_name();
+
+    case KEY_SEQ:
+      return getKey_seq();
+
+    case UPDATE_RULE:
+      return getUpdate_rule();
+
+    case DELETE_RULE:
+      return getDelete_rule();
+
+    case FK_NAME:
+      return getFk_name();
+
+    case PK_NAME:
+      return getPk_name();
+
+    case ENABLE_CSTR:
+      return isEnable_cstr();
+
+    case VALIDATE_CSTR:
+      return isValidate_cstr();
+
+    case RELY_CSTR:
+      return isRely_cstr();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PKTABLE_DB:
+      return isSetPktable_db();
+    case PKTABLE_NAME:
+      return isSetPktable_name();
+    case PKCOLUMN_NAME:
+      return isSetPkcolumn_name();
+    case FKTABLE_DB:
+      return isSetFktable_db();
+    case FKTABLE_NAME:
+      return isSetFktable_name();
+    case FKCOLUMN_NAME:
+      return isSetFkcolumn_name();
+    case KEY_SEQ:
+      return isSetKey_seq();
+    case UPDATE_RULE:
+      return isSetUpdate_rule();
+    case DELETE_RULE:
+      return isSetDelete_rule();
+    case FK_NAME:
+      return isSetFk_name();
+    case PK_NAME:
+      return isSetPk_name();
+    case ENABLE_CSTR:
+      return isSetEnable_cstr();
+    case VALIDATE_CSTR:
+      return isSetValidate_cstr();
+    case RELY_CSTR:
+      return isSetRely_cstr();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof SQLForeignKey)
+      return this.equals((SQLForeignKey)that);
+    return false;
+  }
+
+  public boolean equals(SQLForeignKey that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_pktable_db = true && this.isSetPktable_db();
+    boolean that_present_pktable_db = true && that.isSetPktable_db();
+    if (this_present_pktable_db || that_present_pktable_db) {
+      if (!(this_present_pktable_db && that_present_pktable_db))
+        return false;
+      if (!this.pktable_db.equals(that.pktable_db))
+        return false;
+    }
+
+    boolean this_present_pktable_name = true && this.isSetPktable_name();
+    boolean that_present_pktable_name = true && that.isSetPktable_name();
+    if (this_present_pktable_name || that_present_pktable_name) {
+      if (!(this_present_pktable_name && that_present_pktable_name))
+        return false;
+      if (!this.pktable_name.equals(that.pktable_name))
+        return false;
+    }
+
+    boolean this_present_pkcolumn_name = true && this.isSetPkcolumn_name();
+    boolean that_present_pkcolumn_name = true && that.isSetPkcolumn_name();
+    if (this_present_pkcolumn_name || that_present_pkcolumn_name) {
+      if (!(this_present_pkcolumn_name && that_present_pkcolumn_name))
+        return false;
+      if (!this.pkcolumn_name.equals(that.pkcolumn_name))
+        return false;
+    }
+
+    boolean this_present_fktable_db = true && this.isSetFktable_db();
+    boolean that_present_fktable_db = true && that.isSetFktable_db();
+    if (this_present_fktable_db || that_present_fktable_db) {
+      if (!(this_present_fktable_db && that_present_fktable_db))
+        return false;
+      if (!this.fktable_db.equals(that.fktable_db))
+        return false;
+    }
+
+    boolean this_present_fktable_name = true && this.isSetFktable_name();
+    boolean that_present_fktable_name = true && that.isSetFktable_name();
+    if (this_present_fktable_name || that_present_fktable_name) {
+      if (!(this_present_fktable_name && that_present_fktable_name))
+        return false;
+      if (!this.fktable_name.equals(that.fktable_name))
+        return false;
+    }
+
+    boolean this_present_fkcolumn_name = true && this.isSetFkcolumn_name();
+    boolean that_present_fkcolumn_name = true && that.isSetFkcolumn_name();
+    if (this_present_fkcolumn_name || that_present_fkcolumn_name) {
+      if (!(this_present_fkcolumn_name && that_present_fkcolumn_name))
+        return false;
+      if (!this.fkcolumn_name.equals(that.fkcolumn_name))
+        return false;
+    }
+
+    boolean this_present_key_seq = true;
+    boolean that_present_key_seq = true;
+    if (this_present_key_seq || that_present_key_seq) {
+      if (!(this_present_key_seq && that_present_key_seq))
+        return false;
+      if (this.key_seq != that.key_seq)
+        return false;
+    }
+
+    boolean this_present_update_rule = true;
+    boolean that_present_update_rule = true;
+    if (this_present_update_rule || that_present_update_rule) {
+      if (!(this_present_update_rule && that_present_update_rule))
+        return false;
+      if (this.update_rule != that.update_rule)
+        return false;
+    }
+
+    boolean this_present_delete_rule = true;
+    boolean that_present_delete_rule = true;
+    if (this_present_delete_rule || that_present_delete_rule) {
+      if (!(this_present_delete_rule && that_present_delete_rule))
+        return false;
+      if (this.delete_rule != that.delete_rule)
+        return false;
+    }
+
+    boolean this_present_fk_name = true && this.isSetFk_name();
+    boolean that_present_fk_name = true && that.isSetFk_name();
+    if (this_present_fk_name || that_present_fk_name) {
+      if (!(this_present_fk_name && that_present_fk_name))
+        return false;
+      if (!this.fk_name.equals(that.fk_name))
+        return false;
+    }
+
+    boolean this_present_pk_name = true && this.isSetPk_name();
+    boolean that_present_pk_name = true && that.isSetPk_name();
+    if (this_present_pk_name || that_present_pk_name) {
+      if (!(this_present_pk_name && that_present_pk_name))
+        return false;
+      if (!this.pk_name.equals(that.pk_name))
+        return false;
+    }
+
+    boolean this_present_enable_cstr = true;
+    boolean that_present_enable_cstr = true;
+    if (this_present_enable_cstr || that_present_enable_cstr) {
+      if (!(this_present_enable_cstr && that_present_enable_cstr))
+        return false;
+      if (this.enable_cstr != that.enable_cstr)
+        return false;
+    }
+
+    boolean this_present_validate_cstr = true;
+    boolean that_present_validate_cstr = true;
+    if (this_present_validate_cstr || that_present_validate_cstr) {
+      if (!(this_present_validate_cstr && that_present_validate_cstr))
+        return false;
+      if (this.validate_cstr != that.validate_cstr)
+        return false;
+    }
+
+    boolean this_present_rely_cstr = true;
+    boolean that_present_rely_cstr = true;
+    if (this_present_rely_cstr || that_present_rely_cstr) {
+      if (!(this_present_rely_cstr && that_present_rely_cstr))
+        return false;
+      if (this.rely_cstr != that.rely_cstr)
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_pktable_db = true && (isSetPktable_db());
+    list.add(present_pktable_db);
+    if (present_pktable_db)
+      list.add(pktable_db);
+
+    boolean present_pktable_name = true && (isSetPktable_name());
+    list.add(present_pktable_name);
+    if (present_pktable_name)
+      list.add(pktable_name);
+
+    boolean present_pkcolumn_name = true && (isSetPkcolumn_name());
+    list.add(present_pkcolumn_name);
+    if (present_pkcolumn_name)
+      list.add(pkcolumn_name);
+
+    boolean present_fktable_db = true && (isSetFktable_db());
+    list.add(present_fktable_db);
+    if (present_fktable_db)
+      list.add(fktable_db);
+
+    boolean present_fktable_name = true && (isSetFktable_name());
+    list.add(present_fktable_name);
+    if (present_fktable_name)
+      list.add(fktable_name);
+
+    boolean present_fkcolumn_name = true && (isSetFkcolumn_name());
+    list.add(present_fkcolumn_name);
+    if (present_fkcolumn_name)
+      list.add(fkcolumn_name);
+
+    boolean present_key_seq = true;
+    list.add(present_key_seq);
+    if (present_key_seq)
+      list.add(key_seq);
+
+    boolean present_update_rule = true;
+    list.add(present_update_rule);
+    if (present_update_rule)
+      list.add(update_rule);
+
+    boolean present_delete_rule = true;
+    list.add(present_delete_rule);
+    if (present_delete_rule)
+      list.add(delete_rule);
+
+    boolean present_fk_name = true && (isSetFk_name());
+    list.add(present_fk_name);
+    if (present_fk_name)
+      list.add(fk_name);
+
+    boolean present_pk_name = true && (isSetPk_name());
+    list.add(present_pk_name);
+    if (present_pk_name)
+      list.add(pk_name);
+
+    boolean present_enable_cstr = true;
+    list.add(present_enable_cstr);
+    if (present_enable_cstr)
+      list.add(enable_cstr);
+
+    boolean present_validate_cstr = true;
+    list.add(present_validate_cstr);
+    if (present_validate_cstr)
+      list.add(validate_cstr);
+
+    boolean present_rely_cstr = true;
+    list.add(present_rely_cstr);
+    if (present_rely_cstr)
+      list.add(rely_cstr);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(SQLForeignKey other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPktable_db()).compareTo(other.isSetPktable_db());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPktable_db()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pktable_db, other.pktable_db);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPktable_name()).compareTo(other.isSetPktable_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPktable_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pktable_name, other.pktable_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPkcolumn_name()).compareTo(other.isSetPkcolumn_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPkcolumn_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pkcolumn_name, other.pkcolumn_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFktable_db()).compareTo(other.isSetFktable_db());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFktable_db()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fktable_db, other.fktable_db);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFktable_name()).compareTo(other.isSetFktable_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFktable_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fktable_name, other.fktable_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFkcolumn_name()).compareTo(other.isSetFkcolumn_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFkcolumn_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fkcolumn_name, other.fkcolumn_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetKey_seq()).compareTo(other.isSetKey_seq());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetKey_seq()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_seq, other.key_seq);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetUpdate_rule()).compareTo(other.isSetUpdate_rule());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUpdate_rule()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.update_rule, other.update_rule);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDelete_rule()).compareTo(other.isSetDelete_rule());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDelete_rule()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delete_rule, other.delete_rule);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetFk_name()).compareTo(other.isSetFk_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFk_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fk_name, other.fk_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPk_name()).compareTo(other.isSetPk_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPk_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pk_name, other.pk_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEnable_cstr()).compareTo(other.isSetEnable_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEnable_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.enable_cstr, other.enable_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidate_cstr()).compareTo(other.isSetValidate_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidate_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validate_cstr, other.validate_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRely_cstr()).compareTo(other.isSetRely_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRely_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rely_cstr, other.rely_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("SQLForeignKey(");
+    boolean first = true;
+
+    sb.append("pktable_db:");
+    if (this.pktable_db == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.pktable_db);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("pktable_name:");
+    if (this.pktable_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.pktable_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("pkcolumn_name:");
+    if (this.pkcolumn_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.pkcolumn_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("fktable_db:");
+    if (this.fktable_db == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fktable_db);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("fktable_name:");
+    if (this.fktable_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fktable_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("fkcolumn_name:");
+    if (this.fkcolumn_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fkcolumn_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("key_seq:");
+    sb.append(this.key_seq);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("update_rule:");
+    sb.append(this.update_rule);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("delete_rule:");
+    sb.append(this.delete_rule);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("fk_name:");
+    if (this.fk_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fk_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("pk_name:");
+    if (this.pk_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.pk_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("enable_cstr:");
+    sb.append(this.enable_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validate_cstr:");
+    sb.append(this.validate_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("rely_cstr:");
+    sb.append(this.rely_cstr);
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class SQLForeignKeyStandardSchemeFactory implements SchemeFactory {
+    public SQLForeignKeyStandardScheme getScheme() {
+      return new SQLForeignKeyStandardScheme();
+    }
+  }
+
+  private static class SQLForeignKeyStandardScheme extends StandardScheme<SQLForeignKey> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SQLForeignKey struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PKTABLE_DB
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.pktable_db = iprot.readString();
+              struct.setPktable_dbIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PKTABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.pktable_name = iprot.readString();
+              struct.setPktable_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PKCOLUMN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.pkcolumn_name = iprot.readString();
+              struct.setPkcolumn_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // FKTABLE_DB
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.fktable_db = iprot.readString();
+              struct.setFktable_dbIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // FKTABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.fktable_name = iprot.readString();
+              struct.setFktable_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // FKCOLUMN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.fkcolumn_name = iprot.readString();
+              struct.setFkcolumn_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // KEY_SEQ
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.key_seq = iprot.readI32();
+              struct.setKey_seqIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // UPDATE_RULE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.update_rule = iprot.readI32();
+              struct.setUpdate_ruleIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // DELETE_RULE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.delete_rule = iprot.readI32();
+              struct.setDelete_ruleIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 10: // FK_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.fk_name = iprot.readString();
+              struct.setFk_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 11: // PK_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.pk_name = iprot.readString();
+              struct.setPk_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 12: // ENABLE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.enable_cstr = iprot.readBool();
+              struct.setEnable_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 13: // VALIDATE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.validate_cstr = iprot.readBool();
+              struct.setValidate_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 14: // RELY_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.rely_cstr = iprot.readBool();
+              struct.setRely_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 15: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SQLForeignKey struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.pktable_db != null) {
+        oprot.writeFieldBegin(PKTABLE_DB_FIELD_DESC);
+        oprot.writeString(struct.pktable_db);
+        oprot.writeFieldEnd();
+      }
+      if (struct.pktable_name != null) {
+        oprot.writeFieldBegin(PKTABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.pktable_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.pkcolumn_name != null) {
+        oprot.writeFieldBegin(PKCOLUMN_NAME_FIELD_DESC);
+        oprot.writeString(struct.pkcolumn_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.fktable_db != null) {
+        oprot.writeFieldBegin(FKTABLE_DB_FIELD_DESC);
+        oprot.writeString(struct.fktable_db);
+        oprot.writeFieldEnd();
+      }
+      if (struct.fktable_name != null) {
+        oprot.writeFieldBegin(FKTABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.fktable_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.fkcolumn_name != null) {
+        oprot.writeFieldBegin(FKCOLUMN_NAME_FIELD_DESC);
+        oprot.writeString(struct.fkcolumn_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(KEY_SEQ_FIELD_DESC);
+      oprot.writeI32(struct.key_seq);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(UPDATE_RULE_FIELD_DESC);
+      oprot.writeI32(struct.update_rule);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(DELETE_RULE_FIELD_DESC);
+      oprot.writeI32(struct.delete_rule);
+      oprot.writeFieldEnd();
+      if (struct.fk_name != null) {
+        oprot.writeFieldBegin(FK_NAME_FIELD_DESC);
+        oprot.writeString(struct.fk_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.pk_name != null) {
+        oprot.writeFieldBegin(PK_NAME_FIELD_DESC);
+        oprot.writeString(struct.pk_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(ENABLE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.enable_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(VALIDATE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.validate_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.rely_cstr);
+      oprot.writeFieldEnd();
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class SQLForeignKeyTupleSchemeFactory implements SchemeFactory {
+    public SQLForeignKeyTupleScheme getScheme() {
+      return new SQLForeignKeyTupleScheme();
+    }
+  }
+
+  private static class SQLForeignKeyTupleScheme extends TupleScheme<SQLForeignKey> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPktable_db()) {
+        optionals.set(0);
+      }
+      if (struct.isSetPktable_name()) {
+        optionals.set(1);
+      }
+      if (struct.isSetPkcolumn_name()) {
+        optionals.set(2);
+      }
+      if (struct.isSetFktable_db()) {
+        optionals.set(3);
+      }
+      if (struct.isSetFktable_name()) {
+        optionals.set(4);
+      }
+      if (struct.isSetFkcolumn_name()) {
+        optionals.set(5);
+      }
+      if (struct.isSetKey_seq()) {
+        optionals.set(6);
+      }
+      if (struct.isSetUpdate_rule()) {
+        optionals.set(7);
+      }
+      if (struct.isSetDelete_rule()) {
+        optionals.set(8);
+      }
+      if (struct.isSetFk_name()) {
+        optionals.set(9);
+      }
+      if (struct.isSetPk_name()) {
+        optionals.set(10);
+      }
+      if (struct.isSetEnable_cstr()) {
+        optionals.set(11);
+      }
+      if (struct.isSetValidate_cstr()) {
+        optionals.set(12);
+      }
+      if (struct.isSetRely_cstr()) {
+        optionals.set(13);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(14);
+      }
+      oprot.writeBitSet(optionals, 15);
+      if (struct.isSetPktable_db()) {
+        oprot.writeString(struct.pktable_db);
+      }
+      if (struct.isSetPktable_name()) {
+        oprot.writeString(struct.pktable_name);
+      }
+      if (struct.isSetPkcolumn_name()) {
+        oprot.writeString(struct.pkcolumn_name);
+      }
+      if (struct.isSetFktable_db()) {
+        oprot.writeString(struct.fktable_db);
+      }
+      if (struct.isSetFktable_name()) {
+        oprot.writeString(struct.fktable_name);
+      }
+      if (struct.isSetFkcolumn_name()) {
+        oprot.writeString(struct.fkcolumn_name);
+      }
+      if (struct.isSetKey_seq()) {
+        oprot.writeI32(struct.key_seq);
+      }
+      if (struct.isSetUpdate_rule()) {
+        oprot.writeI32(struct.update_rule);
+      }
+      if (struct.isSetDelete_rule()) {
+        oprot.writeI32(struct.delete_rule);
+      }
+      if (struct.isSetFk_name()) {
+        oprot.writeString(struct.fk_name);
+      }
+      if (struct.isSetPk_name()) {
+        oprot.writeString(struct.pk_name);
+      }
+      if (struct.isSetEnable_cstr()) {
+        oprot.writeBool(struct.enable_cstr);
+      }
+      if (struct.isSetValidate_cstr()) {
+        oprot.writeBool(struct.validate_cstr);
+      }
+      if (struct.isSetRely_cstr()) {
+        oprot.writeBool(struct.rely_cstr);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(15);
+      if (incoming.get(0)) {
+        struct.pktable_db = iprot.readString();
+        struct.setPktable_dbIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.pktable_name = iprot.readString();
+        struct.setPktable_nameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.pkcolumn_name = iprot.readString();
+        struct.setPkcolumn_nameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.fktable_db = iprot.readString();
+        struct.setFktable_dbIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.fktable_name = iprot.readString();
+        struct.setFktable_nameIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.fkcolumn_name = iprot.readString();
+        struct.setFkcolumn_nameIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.key_seq = iprot.readI32();
+        struct.setKey_seqIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.update_rule = iprot.readI32();
+        struct.setUpdate_ruleIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.delete_rule = iprot.readI32();
+        struct.setDelete_ruleIsSet(true);
+      }
+      if (incoming.get(9)) {
+        struct.fk_name = iprot.readString();
+        struct.setFk_nameIsSet(true);
+      }
+      if (incoming.get(10)) {
+        struct.pk_name = iprot.readString();
+        struct.setPk_nameIsSet(true);
+      }
+      if (incoming.get(11)) {
+        struct.enable_cstr = iprot.readBool();
+        struct.setEnable_cstrIsSet(true);
+      }
+      if (incoming.get(12)) {
+        struct.validate_cstr = iprot.readBool();
+        struct.setValidate_cstrIsSet(true);
+      }
+      if (incoming.get(13)) {
+        struct.rely_cstr = iprot.readBool();
+        struct.setRely_cstrIsSet(true);
+      }
+      if (incoming.get(14)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+


[71/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index 0000000,29c98d1..3a65f77
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@@ -1,0 -1,104 +1,109 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore;
+ import org.apache.hadoop.hive.metastore.txn.TxnStore;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ 
+ /**
+  * An interface wrapper for HMSHandler.  This interface contains methods that need to be
+  * called by internal classes but that are not part of the thrift interface.
+  */
+ @InterfaceAudience.Private
+ public interface IHMSHandler extends ThriftHiveMetastore.Iface, Configurable {
+ 
+   void init() throws MetaException;
+ 
+   /**
+    * Get the id of the thread of this handler.
+    * @return thread id
+    */
+   int getThreadId();
+ 
+   /**
+    * Get a reference to the underlying RawStore.
+    * @return the RawStore instance.
+    * @throws MetaException if the creation of a new RawStore object is necessary but fails.
+    */
+   RawStore getMS() throws MetaException;
+ 
+   /**
+    * Get a reference to the underlying TxnStore.
+    * @return the TxnStore instance.
+    */
+   TxnStore getTxnHandler();
+ 
+   /**
+    * Get a reference to Hive's warehouse object (the class that does all the physical operations).
+    * @return Warehouse instance.
+    */
+   Warehouse getWh();
+ 
+   /**
+    * Equivalent to get_database, but does not write to audit logs, or fire pre-event listeners.
+    * Meant to be used for internal hive classes that don't use the thrift interface.
+    * @param catName catalog name
+    * @param name database name
+    * @return database object
+    * @throws NoSuchObjectException If the database does not exist.
+    * @throws MetaException If another error occurs.
+    */
+   Database get_database_core(final String catName, final String name)
+       throws NoSuchObjectException, MetaException;
+ 
+   /**
+    * Equivalent of get_table, but does not log audits and fire pre-event listener.
+    * Meant to be used for calls made by other hive classes, that are not using the
+    * thrift interface.
+    * @param catName catalog name
+    * @param dbname database name
+    * @param name table name
+    * @return Table object
+    * @throws NoSuchObjectException If the table does not exist.
+    * @throws MetaException  If another error occurs.
+    */
+   Table get_table_core(final String catName, final String dbname, final String name)
+       throws MetaException, NoSuchObjectException;
+ 
++  Table get_table_core(final String catName, final String dbname,
++                       final String name, final long txnId,
++                       final String writeIdList)
++      throws MetaException, NoSuchObjectException;
++
+   /**
+    * Get a list of all transactional listeners.
+    * @return list of listeners.
+    */
+   List<TransactionalMetaStoreEventListener> getTransactionalListeners();
+ 
+   /**
+    * Get a list of all non-transactional listeners.
+    * @return list of non-transactional listeners.
+    */
+   List<MetaStoreEventListener> getListeners();
+ }


[82/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
index 0000000,ec129ef..183f977
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java
@@@ -1,0 -1,239288 +1,240381 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ThriftHiveMetastore {
+ 
+   /**
+    * This interface is live.
+    */
+   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface Iface extends com.facebook.fb303.FacebookService.Iface {
+ 
+     public String getMetaConf(String key) throws MetaException, org.apache.thrift.TException;
+ 
+     public void setMetaConf(String key, String value) throws MetaException, org.apache.thrift.TException;
+ 
+     public void create_catalog(CreateCatalogRequest catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_catalog(AlterCatalogRequest rqst) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public GetCatalogsResponse get_catalogs() throws MetaException, org.apache.thrift.TException;
+ 
+     public void drop_catalog(DropCatalogRequest catName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public Database get_database(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void drop_database(String name, boolean deleteData, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_databases(String pattern) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_all_databases() throws MetaException, org.apache.thrift.TException;
+ 
+     public void alter_database(String dbname, Database db) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public Type get_type(String name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public boolean create_type(Type type) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean drop_type(String type) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public Map<String,Type> get_type_all(String name) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<FieldSchema> get_fields(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public List<FieldSchema> get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public List<FieldSchema> get_schema(String db_name, String table_name) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public List<FieldSchema> get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context) throws MetaException, UnknownTableException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public void create_table(Table tbl) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public void create_table_with_constraints(Table tbl, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public void drop_constraint(DropConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_primary_key(AddPrimaryKeyRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_foreign_key(AddForeignKeyRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_unique_constraint(AddUniqueConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_not_null_constraint(AddNotNullConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_default_constraint(AddDefaultConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_check_constraint(AddCheckConstraintRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void drop_table(String dbname, String name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void truncate_table(String dbName, String tableName, List<String> partNames) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_tables(String db_name, String pattern) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_tables_by_type(String db_name, String pattern, String tableType) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_materialized_views_for_rewriting(String db_name) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<TableMeta> get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_all_tables(String db_name) throws MetaException, org.apache.thrift.TException;
+ 
+     public Table get_table(String dbname, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<Table> get_table_objects_by_name(String dbname, List<String> tbl_names) throws org.apache.thrift.TException;
+ 
+     public GetTableResult get_table_req(GetTableRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public GetTablesResult get_table_objects_by_name_req(GetTablesRequest req) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public Map<String,Materialization> get_materialization_invalidation_info(String dbname, List<String> tbl_names) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public List<String> get_table_names_by_filter(String dbname, String filter, short max_tables) throws MetaException, InvalidOperationException, UnknownDBException, org.apache.thrift.TException;
+ 
+     public void alter_table(String dbname, String tbl_name, Table new_tbl) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition add_partition(Partition new_part) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public int add_partitions(List<Partition> new_parts) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public int add_partitions_pspec(List<PartitionSpec> new_parts) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition append_partition(String db_name, String tbl_name, List<String> part_vals) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition append_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition append_partition_by_name(String db_name, String tbl_name, String part_name) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context) throws InvalidObjectException, AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean drop_partition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean drop_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean drop_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, boolean deleteData, EnvironmentContext environment_context) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public DropPartitionsResult drop_partitions_req(DropPartitionsRequest req) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public Partition get_partition(String db_name, String tbl_name, List<String> part_vals) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public Partition exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public List<Partition> exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name) throws MetaException, NoSuchObjectException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public Partition get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public Partition get_partition_by_name(String db_name, String tbl_name, String part_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<Partition> get_partitions(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public List<Partition> get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List<String> group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public List<PartitionSpec> get_partitions_pspec(String db_name, String tbl_name, int max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_partition_names(String db_name, String tbl_name, short max_parts) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public PartitionValuesResponse get_partition_values(PartitionValuesRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<Partition> get_partitions_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<Partition> get_partitions_ps_with_auth(String db_name, String tbl_name, List<String> part_vals, short max_parts, String user_name, List<String> group_names) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_partition_names_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<Partition> get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<PartitionSpec> get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public int get_num_partitions_by_filter(String db_name, String tbl_name, String filter) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public List<Partition> get_partitions_by_names(String db_name, String tbl_name, List<String> names) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public void alter_partition(String db_name, String tbl_name, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
++    public AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
++
+     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void rename_partition(String db_name, String tbl_name, List<String> part_vals, Partition new_part) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception) throws MetaException, org.apache.thrift.TException;
+ 
+     public String get_config_value(String name, String defaultValue) throws ConfigValSecurityException, org.apache.thrift.TException;
+ 
+     public List<String> partition_name_to_vals(String part_name) throws MetaException, org.apache.thrift.TException;
+ 
+     public Map<String,String> partition_name_to_spec(String part_name) throws MetaException, org.apache.thrift.TException;
+ 
+     public void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType) throws MetaException, NoSuchObjectException, UnknownDBException, UnknownTableException, UnknownPartitionException, InvalidPartitionException, org.apache.thrift.TException;
+ 
+     public boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType) throws MetaException, NoSuchObjectException, UnknownDBException, UnknownTableException, UnknownPartitionException, InvalidPartitionException, org.apache.thrift.TException;
+ 
+     public PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public CheckConstraintsResponse get_check_constraints(CheckConstraintsRequest request) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public boolean update_partition_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public ColumnStatistics get_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException;
+ 
+     public ColumnStatistics get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidInputException, InvalidObjectException, org.apache.thrift.TException;
+ 
+     public TableStatsResult get_table_statistics_req(TableStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public boolean delete_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException;
+ 
+     public void create_function(Function func) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public void drop_function(String dbName, String funcName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_function(String dbName, String funcName, Function newFunc) throws InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_functions(String dbName, String pattern) throws MetaException, org.apache.thrift.TException;
+ 
+     public Function get_function(String dbName, String funcName) throws MetaException, NoSuchObjectException, org.apache.thrift.TException;
+ 
+     public GetAllFunctionsResponse get_all_functions() throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean create_role(Role role) throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean drop_role(String role_name) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> get_role_names() throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean grant_role(String role_name, String principal_name, PrincipalType principal_type, String grantor, PrincipalType grantorType, boolean grant_option) throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean revoke_role(String role_name, String principal_name, PrincipalType principal_type) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<Role> list_roles(String principal_name, PrincipalType principal_type) throws MetaException, org.apache.thrift.TException;
+ 
+     public GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, String user_name, List<String> group_names) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<HiveObjectPrivilege> list_privileges(String principal_name, PrincipalType principal_type, HiveObjectRef hiveObject) throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean grant_privileges(PrivilegeBag privileges) throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean revoke_privileges(PrivilegeBag privileges) throws MetaException, org.apache.thrift.TException;
+ 
+     public GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef objToRefresh, String authorizer, GrantRevokePrivilegeRequest grantRequest) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<String> set_ugi(String user_name, List<String> group_names) throws MetaException, org.apache.thrift.TException;
+ 
+     public String get_delegation_token(String token_owner, String renewer_kerberos_principal_name) throws MetaException, org.apache.thrift.TException;
+ 
+     public long renew_delegation_token(String token_str_form) throws MetaException, org.apache.thrift.TException;
+ 
+     public void cancel_delegation_token(String token_str_form) throws MetaException, org.apache.thrift.TException;
+ 
+     public boolean add_token(String token_identifier, String delegation_token) throws org.apache.thrift.TException;
+ 
+     public boolean remove_token(String token_identifier) throws org.apache.thrift.TException;
+ 
+     public String get_token(String token_identifier) throws org.apache.thrift.TException;
+ 
+     public List<String> get_all_token_identifiers() throws org.apache.thrift.TException;
+ 
+     public int add_master_key(String key) throws MetaException, org.apache.thrift.TException;
+ 
+     public void update_master_key(int seq_number, String key) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public boolean remove_master_key(int key_seq) throws org.apache.thrift.TException;
+ 
+     public List<String> get_master_keys() throws org.apache.thrift.TException;
+ 
+     public GetOpenTxnsResponse get_open_txns() throws org.apache.thrift.TException;
+ 
+     public GetOpenTxnsInfoResponse get_open_txns_info() throws org.apache.thrift.TException;
+ 
+     public OpenTxnsResponse open_txns(OpenTxnRequest rqst) throws org.apache.thrift.TException;
+ 
+     public void abort_txn(AbortTxnRequest rqst) throws NoSuchTxnException, org.apache.thrift.TException;
+ 
+     public void abort_txns(AbortTxnsRequest rqst) throws NoSuchTxnException, org.apache.thrift.TException;
+ 
+     public void commit_txn(CommitTxnRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException;
+ 
+     public void repl_tbl_writeid_state(ReplTblWriteIdStateRequest rqst) throws org.apache.thrift.TException;
+ 
+     public GetValidWriteIdsResponse get_valid_write_ids(GetValidWriteIdsRequest rqst) throws NoSuchTxnException, MetaException, org.apache.thrift.TException;
+ 
+     public AllocateTableWriteIdsResponse allocate_table_write_ids(AllocateTableWriteIdsRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException, org.apache.thrift.TException;
+ 
+     public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException;
+ 
+     public LockResponse check_lock(CheckLockRequest rqst) throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, org.apache.thrift.TException;
+ 
+     public void unlock(UnlockRequest rqst) throws NoSuchLockException, TxnOpenException, org.apache.thrift.TException;
+ 
+     public ShowLocksResponse show_locks(ShowLocksRequest rqst) throws org.apache.thrift.TException;
+ 
+     public void heartbeat(HeartbeatRequest ids) throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException;
+ 
+     public HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns) throws org.apache.thrift.TException;
+ 
+     public void compact(CompactionRequest rqst) throws org.apache.thrift.TException;
+ 
+     public CompactionResponse compact2(CompactionRequest rqst) throws org.apache.thrift.TException;
+ 
+     public ShowCompactResponse show_compact(ShowCompactRequest rqst) throws org.apache.thrift.TException;
+ 
+     public void add_dynamic_partitions(AddDynamicPartitions rqst) throws NoSuchTxnException, TxnAbortedException, org.apache.thrift.TException;
+ 
+     public NotificationEventResponse get_next_notification(NotificationEventRequest rqst) throws org.apache.thrift.TException;
+ 
+     public CurrentNotificationEventId get_current_notificationEventId() throws org.apache.thrift.TException;
+ 
+     public NotificationEventsCountResponse get_notification_events_count(NotificationEventsCountRequest rqst) throws org.apache.thrift.TException;
+ 
+     public FireEventResponse fire_listener_event(FireEventRequest rqst) throws org.apache.thrift.TException;
+ 
+     public void flushCache() throws org.apache.thrift.TException;
+ 
+     public WriteNotificationLogResponse add_write_notification_log(WriteNotificationLogRequest rqst) throws org.apache.thrift.TException;
+ 
+     public CmRecycleResponse cm_recycle(CmRecycleRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req) throws org.apache.thrift.TException;
+ 
+     public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws org.apache.thrift.TException;
+ 
+     public PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req) throws org.apache.thrift.TException;
+ 
+     public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) throws org.apache.thrift.TException;
+ 
+     public CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req) throws org.apache.thrift.TException;
+ 
+     public String get_metastore_db_uuid() throws MetaException, org.apache.thrift.TException;
+ 
+     public WMCreateResourcePlanResponse create_resource_plan(WMCreateResourcePlanRequest request) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMGetResourcePlanResponse get_resource_plan(WMGetResourcePlanRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMGetActiveResourcePlanResponse get_active_resource_plan(WMGetActiveResourcePlanRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public WMGetAllResourcePlanResponse get_all_resource_plans(WMGetAllResourcePlanRequest request) throws MetaException, org.apache.thrift.TException;
+ 
+     public WMAlterResourcePlanResponse alter_resource_plan(WMAlterResourcePlanRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public WMValidateResourcePlanResponse validate_resource_plan(WMValidateResourcePlanRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMDropResourcePlanResponse drop_resource_plan(WMDropResourcePlanRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public WMCreateTriggerResponse create_wm_trigger(WMCreateTriggerRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMAlterTriggerResponse alter_wm_trigger(WMAlterTriggerRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMDropTriggerResponse drop_wm_trigger(WMDropTriggerRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request) throws AlreadyExistsException, NoSuchObjectException, InvalidObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void create_ischema(ISchema schema) throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void alter_ischema(AlterISchemaRequest rqst) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public ISchema get_ischema(ISchemaName name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void drop_ischema(ISchemaName name) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_schema_version(SchemaVersion schemaVersion) throws AlreadyExistsException, NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public SchemaVersion get_schema_version(SchemaVersionDescriptor schemaVersion) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public SchemaVersion get_schema_latest_version(ISchemaName schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public List<SchemaVersion> get_schema_all_versions(ISchemaName schemaName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void drop_schema_version(SchemaVersionDescriptor schemaVersion) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst rqst) throws MetaException, org.apache.thrift.TException;
+ 
+     public void map_schema_version_to_serde(MapSchemaVersionToSerdeRequest rqst) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public void set_schema_version_state(SetSchemaVersionStateRequest rqst) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException;
+ 
+     public void add_serde(SerDeInfo serde) throws AlreadyExistsException, MetaException, org.apache.thrift.TException;
+ 
+     public SerDeInfo get_serde(GetSerdeRequest rqst) throws NoSuchObjectException, MetaException, org.apache.thrift.TException;
+ 
+     public LockResponse get_lock_materialization_rebuild(String dbName, String tableName, long txnId) throws org.apache.thrift.TException;
+ 
+     public boolean heartbeat_lock_materialization_rebuild(String dbName, String tableName, long txnId) throws org.apache.thrift.TException;
+ 
+     public void add_runtime_stats(RuntimeStat stat) throws MetaException, org.apache.thrift.TException;
+ 
+     public List<RuntimeStat> get_runtime_stats(GetRuntimeStatsRequest rqst) throws MetaException, org.apache.thrift.TException;
+ 
+   }
+ 
+   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface AsyncIface extends com.facebook.fb303.FacebookService .AsyncIface {
+ 
+     public void getMetaConf(String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void setMetaConf(String key, String value, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_catalog(CreateCatalogRequest catalog, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_catalog(AlterCatalogRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_catalog(GetCatalogRequest catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_catalogs(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_catalog(DropCatalogRequest catName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_database(Database database, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_database(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_database(String name, boolean deleteData, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_databases(String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_all_databases(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_database(String dbname, Database db, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_type(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_type(Type type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_type(String type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_type_all(String name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_fields(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_fields_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_schema(String db_name, String table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_schema_with_environment_context(String db_name, String table_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_table(Table tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_table_with_constraints(Table tbl, List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys, List<SQLUniqueConstraint> uniqueConstraints, List<SQLNotNullConstraint> notNullConstraints, List<SQLDefaultConstraint> defaultConstraints, List<SQLCheckConstraint> checkConstraints, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_constraint(DropConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_primary_key(AddPrimaryKeyRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_foreign_key(AddForeignKeyRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_unique_constraint(AddUniqueConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_not_null_constraint(AddNotNullConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_default_constraint(AddDefaultConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_check_constraint(AddCheckConstraintRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_table(String dbname, String name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_table_with_environment_context(String dbname, String name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void truncate_table(String dbName, String tableName, List<String> partNames, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_tables(String db_name, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_tables_by_type(String db_name, String pattern, String tableType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_materialized_views_for_rewriting(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_meta(String db_patterns, String tbl_patterns, List<String> tbl_types, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_all_tables(String db_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table(String dbname, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_objects_by_name(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_req(GetTableRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_objects_by_name_req(GetTablesRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_materialization_invalidation_info(String dbname, List<String> tbl_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void update_creation_metadata(String catName, String dbname, String tbl_name, CreationMetadata creation_metadata, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_names_by_filter(String dbname, String filter, short max_tables, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_table(String dbname, String tbl_name, Table new_tbl, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_table_with_environment_context(String dbname, String tbl_name, Table new_tbl, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_table_with_cascade(String dbname, String tbl_name, Table new_tbl, boolean cascade, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_partition(Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_partitions(List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_partitions_pspec(List<PartitionSpec> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void append_partition(String db_name, String tbl_name, List<String> part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_partitions_req(AddPartitionsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void append_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void append_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void append_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_partition(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_partition_with_environment_context(String db_name, String tbl_name, List<String> part_vals, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_partition_by_name(String db_name, String tbl_name, String part_name, boolean deleteData, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_partition_by_name_with_environment_context(String db_name, String tbl_name, String part_name, boolean deleteData, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_partitions_req(DropPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition(String db_name, String tbl_name, List<String> part_vals, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void exchange_partition(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void exchange_partitions(Map<String,String> partitionSpecs, String source_db, String source_table_name, String dest_db, String dest_table_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition_with_auth(String db_name, String tbl_name, List<String> part_vals, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition_by_name(String db_name, String tbl_name, String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_with_auth(String db_name, String tbl_name, short max_parts, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_pspec(String db_name, String tbl_name, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition_names(String db_name, String tbl_name, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition_values(PartitionValuesRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_ps_with_auth(String db_name, String tbl_name, List<String> part_vals, short max_parts, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition_names_ps(String db_name, String tbl_name, List<String> part_vals, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_by_filter(String db_name, String tbl_name, String filter, short max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_part_specs_by_filter(String db_name, String tbl_name, String filter, int max_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_by_expr(PartitionsByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_num_partitions_by_filter(String db_name, String tbl_name, String filter, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_by_names(String db_name, String tbl_name, List<String> names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_partition(String db_name, String tbl_name, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_partitions(String db_name, String tbl_name, List<Partition> new_parts, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_partitions_with_environment_context(String db_name, String tbl_name, List<Partition> new_parts, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
++    public void alter_partitions_with_environment_context_req(AlterPartitionsRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
++
+     public void alter_partition_with_environment_context(String db_name, String tbl_name, Partition new_part, EnvironmentContext environment_context, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void rename_partition(String db_name, String tbl_name, List<String> part_vals, Partition new_part, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void partition_name_has_valid_characters(List<String> part_vals, boolean throw_exception, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_config_value(String name, String defaultValue, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void partition_name_to_vals(String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void partition_name_to_spec(String part_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void markPartitionForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void isPartitionMarkedForEvent(String db_name, String tbl_name, Map<String,String> part_vals, PartitionEventType eventType, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_primary_keys(PrimaryKeysRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_foreign_keys(ForeignKeysRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_unique_constraints(UniqueConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_not_null_constraints(NotNullConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_default_constraints(DefaultConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_check_constraints(CheckConstraintsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_table_statistics_req(TableStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_partitions_statistics_req(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_aggr_stats_for(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_function(Function func, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_function(String dbName, String funcName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_function(String dbName, String funcName, Function newFunc, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_functions(String dbName, String pattern, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_function(String dbName, String funcName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_all_functions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_role(Role role, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_role(String role_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_role_names(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void grant_role(String role_name, String principal_name, PrincipalType principal_type, String grantor, PrincipalType grantorType, boolean grant_option, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void revoke_role(String role_name, String principal_name, PrincipalType principal_type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void list_roles(String principal_name, PrincipalType principal_type, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void grant_revoke_role(GrantRevokeRoleRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_principals_in_role(GetPrincipalsInRoleRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_privilege_set(HiveObjectRef hiveObject, String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void list_privileges(String principal_name, PrincipalType principal_type, HiveObjectRef hiveObject, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void grant_privileges(PrivilegeBag privileges, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void revoke_privileges(PrivilegeBag privileges, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void grant_revoke_privileges(GrantRevokePrivilegeRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void refresh_privileges(HiveObjectRef objToRefresh, String authorizer, GrantRevokePrivilegeRequest grantRequest, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void set_ugi(String user_name, List<String> group_names, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_delegation_token(String token_owner, String renewer_kerberos_principal_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void renew_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void cancel_delegation_token(String token_str_form, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_token(String token_identifier, String delegation_token, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void remove_token(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_token(String token_identifier, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_all_token_identifiers(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_master_key(String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void update_master_key(int seq_number, String key, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void remove_master_key(int key_seq, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_master_keys(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_open_txns(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_open_txns_info(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void open_txns(OpenTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void abort_txn(AbortTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void abort_txns(AbortTxnsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void commit_txn(CommitTxnRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void repl_tbl_writeid_state(ReplTblWriteIdStateRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_valid_write_ids(GetValidWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void allocate_table_write_ids(AllocateTableWriteIdsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void lock(LockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void check_lock(CheckLockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void unlock(UnlockRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void show_locks(ShowLocksRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void heartbeat(HeartbeatRequest ids, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void heartbeat_txn_range(HeartbeatTxnRangeRequest txns, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void compact(CompactionRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void compact2(CompactionRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void show_compact(ShowCompactRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_dynamic_partitions(AddDynamicPartitions rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_next_notification(NotificationEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_current_notificationEventId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_notification_events_count(NotificationEventsCountRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void fire_listener_event(FireEventRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void flushCache(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_write_notification_log(WriteNotificationLogRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void cm_recycle(CmRecycleRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_file_metadata_by_expr(GetFileMetadataByExprRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_file_metadata(GetFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void put_file_metadata(PutFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void clear_file_metadata(ClearFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void cache_file_metadata(CacheFileMetadataRequest req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_metastore_db_uuid(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_resource_plan(WMCreateResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_resource_plan(WMGetResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_active_resource_plan(WMGetActiveResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_all_resource_plans(WMGetAllResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_resource_plan(WMAlterResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void validate_resource_plan(WMValidateResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_resource_plan(WMDropResourcePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_wm_trigger(WMCreateTriggerRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_wm_trigger(WMAlterTriggerRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_wm_trigger(WMDropTriggerRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_wm_pool(WMCreatePoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_wm_pool(WMAlterPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_wm_pool(WMDropPoolRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_wm_mapping(WMDropMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void create_ischema(ISchema schema, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void alter_ischema(AlterISchemaRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_ischema(ISchemaName name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_ischema(ISchemaName name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_schema_version(SchemaVersion schemaVersion, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_schema_version(SchemaVersionDescriptor schemaVersion, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_schema_latest_version(ISchemaName schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_schema_all_versions(ISchemaName schemaName, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void drop_schema_version(SchemaVersionDescriptor schemaVersion, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_schemas_by_cols(FindSchemasByColsRqst rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void map_schema_version_to_serde(MapSchemaVersionToSerdeRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void set_schema_version_state(SetSchemaVersionStateRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_serde(SerDeInfo serde, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_serde(GetSerdeRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_lock_materialization_rebuild(String dbName, String tableName, long txnId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void heartbeat_lock_materialization_rebuild(String dbName, String tableName, long txnId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void add_runtime_stats(RuntimeStat stat, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+     public void get_runtime_stats(GetRuntimeStatsRequest rqst, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ 
+   }
+ 
+   @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Client extends com.facebook.fb303.FacebookService.Client implements Iface {
+     @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class Factory implements org.apache.thrift.TServiceClientFactory<Client> {
+       public Factory() {}
+       public Client getClient(org.apache.thrift.protocol.TProtocol prot) {
+         return new Client(prot);
+       }
+       public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
+         return new Client(iprot, oprot);
+       }
+     }
+ 
+     public Client(org.apache.thrift.protocol.TProtocol prot)
+     {
+       super(prot, prot);
+     }
+ 
+     public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) {
+       super(iprot, oprot);
+     }
+ 
+     public String getMetaConf(String key) throws MetaException, org.apache.thrift.TException
+     {
+       send_getMetaConf(key);
+       return recv_getMetaConf();
+     }
+ 
+     public void send_getMetaConf(String key) throws org.apache.thrift.TException
+     {
+       getMetaConf_args args = new getMetaConf_args();
+       args.setKey(key);
+       sendBase("getMetaConf", args);
+     }
+ 
+     public String recv_getMetaConf() throws MetaException, org.apache.thrift.TException
+     {
+       getMetaConf_result result = new getMetaConf_result();
+       receiveBase(result, "getMetaConf");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getMetaConf failed: unknown result");
+     }
+ 
+     public void setMetaConf(String key, String value) throws MetaException, org.apache.thrift.TException
+     {
+       send_setMetaConf(key, value);
+       recv_setMetaConf();
+     }
+ 
+     public void send_setMetaConf(String key, String value) throws org.apache.thrift.TException
+     {
+       setMetaConf_args args = new setMetaConf_args();
+       args.setKey(key);
+       args.setValue(value);
+       sendBase("setMetaConf", args);
+     }
+ 
+     public void recv_setMetaConf() throws MetaException, org.apache.thrift.TException
+     {
+       setMetaConf_result result = new setMetaConf_result();
+       receiveBase(result, "setMetaConf");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       return;
+     }
+ 
+     public void create_catalog(CreateCatalogRequest catalog) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException
+     {
+       send_create_catalog(catalog);
+       recv_create_catalog();
+     }
+ 
+     public void send_create_catalog(CreateCatalogRequest catalog) throws org.apache.thrift.TException
+     {
+       create_catalog_args args = new create_catalog_args();
+       args.setCatalog(catalog);
+       sendBase("create_catalog", args);
+     }
+ 
+     public void recv_create_catalog() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException
+     {
+       create_catalog_result result = new create_catalog_result();
+       receiveBase(result, "create_catalog");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       if (result.o3 != null) {
+         throw result.o3;
+       }
+       return;
+     }
+ 
+     public void alter_catalog(AlterCatalogRequest rqst) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+     {
+       send_alter_catalog(rqst);
+       recv_alter_catalog();
+     }
+ 
+     public void send_alter_catalog(AlterCatalogRequest rqst) throws org.apache.thrift.TException
+     {
+       alter_catalog_args args = new alter_catalog_args();
+       args.setRqst(rqst);
+       sendBase("alter_catalog", args);
+     }
+ 
+     public void recv_alter_catalog() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+     {
+       alter_catalog_result result = new alter_catalog_result();
+       receiveBase(result, "alter_catalog");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       if (result.o3 != null) {
+         throw result.o3;
+       }
+       return;
+     }
+ 
+     public GetCatalogResponse get_catalog(GetCatalogRequest catName) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+     {
+       send_get_catalog(catName);
+       return recv_get_catalog();
+     }
+ 
+     public void send_get_catalog(GetCatalogRequest catName) throws org.apache.thrift.TException
+     {
+       get_catalog_args args = new get_catalog_args();
+       args.setCatName(catName);
+       sendBase("get_catalog", args);
+     }
+ 
+     public GetCatalogResponse recv_get_catalog() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+     {
+       get_catalog_result result = new get_catalog_result();
+       receiveBase(result, "get_catalog");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result");
+     }
+ 
+     public GetCatalogsResponse get_catalogs() throws MetaException, org.apache.thrift.TException
+     {
+       send_get_catalogs();
+       return recv_get_catalogs();
+     }
+ 
+     public void send_get_catalogs() throws org.apache.thrift.TException
+     {
+       get_catalogs_args args = new get_catalogs_args();
+       sendBase("get_catalogs", args);
+     }
+ 
+     public GetCatalogsResponse recv_get_catalogs() throws MetaException, org.apache.thrift.TException
+     {
+       get_catalogs_result result = new get_catalogs_result();
+       receiveBase(result, "get_catalogs");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result");
+     }
+ 
+     public void drop_catalog(DropCatalogRequest catName) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+     {
+       send_drop_catalog(catName);
+       recv_drop_catalog();
+     }
+ 
+     public void send_drop_catalog(DropCatalogRequest catName) throws org.apache.thrift.TException
+     {
+       drop_catalog_args args = new drop_catalog_args();
+       args.setCatName(catName);
+       sendBase("drop_catalog", args);
+     }
+ 
+     public void recv_drop_catalog() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+     {
+       drop_catalog_result result = new drop_catalog_result();
+       receiveBase(result, "drop_catalog");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       if (result.o3 != null) {
+         throw result.o3;
+       }
+       return;
+     }
+ 
+     public void create_database(Database database) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException
+     {
+       send_create_database(database);
+       recv_create_database();
+     }
+ 
+     public void send_create_database(Database database) throws org.apache.thrift.TException
+     {
+       create_database_args args = new create_database_args();
+       args.setDatabase(database);
+       sendBase("create_database", args);
+     }
+ 
+     public void recv_create_database() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException
+     {
+       create_database_result result = new create_database_result();
+       receiveBase(result, "create_database");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       if (result.o3 != null) {
+         throw result.o3;
+       }
+       return;
+     }
+ 
+     public Database get_database(String name) throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+     {
+       send_get_database(name);
+       return recv_get_database();
+     }
+ 
+     public void send_get_database(String name) throws org.apache.thrift.TException
+     {
+       get_database_args args = new get_database_args();
+       args.setName(name);
+       sendBase("get_database", args);
+     }
+ 
+     public Database recv_get_database() throws NoSuchObjectException, MetaException, org.apache.thrift.TException
+     {
+       get_database_result result = new get_database_result();
+       receiveBase(result, "get_database");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_database failed: unknown result");
+     }
+ 
+     public void drop_database(String name, boolean deleteData, boolean cascade) throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+     {
+       send_drop_database(name, deleteData, cascade);
+       recv_drop_database();
+     }
+ 
+     public void send_drop_database(String name, boolean deleteData, boolean cascade) throws org.apache.thrift.TException
+     {
+       drop_database_args args = new drop_database_args();
+       args.setName(name);
+       args.setDeleteData(deleteData);
+       args.setCascade(cascade);
+       sendBase("drop_database", args);
+     }
+ 
+     public void recv_drop_database() throws NoSuchObjectException, InvalidOperationException, MetaException, org.apache.thrift.TException
+     {
+       drop_database_result result = new drop_database_result();
+       receiveBase(result, "drop_database");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       if (result.o3 != null) {
+         throw result.o3;
+       }
+       return;
+     }
+ 
+     public List<String> get_databases(String pattern) throws MetaException, org.apache.thrift.TException
+     {
+       send_get_databases(pattern);
+       return recv_get_databases();
+     }
+ 
+     public void send_get_databases(String pattern) throws org.apache.thrift.TException
+     {
+       get_databases_args args = new get_databases_args();
+       args.setPattern(pattern);
+       sendBase("get_databases", args);
+     }
+ 
+     public List<String> recv_get_databases() throws MetaException, org.apache.thrift.TException
+     {
+       get_databases_result result = new get_databases_result();
+       receiveBase(result, "get_databases");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_databases failed: unknown result");
+     }
+ 
+     public List<String> get_all_databases() throws MetaException, org.apache.thrift.TException
+     {
+       send_get_all_databases();
+       return recv_get_all_databases();
+     }
+ 
+     public void send_get_all_databases() throws org.apache.thrift.TException
+     {
+       get_all_databases_args args = new get_all_databases_args();
+       sendBase("get_all_databases", args);
+     }
+ 
+     public List<String> recv_get_all_databases() throws MetaException, org.apache.thrift.TException
+     {
+       get_all_databases_result result = new get_all_databases_result();
+       receiveBase(result, "get_all_databases");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_all_databases failed: unknown result");
+     }
+ 
+     public void alter_database(String dbname, Database db) throws MetaException, NoSuchObjectException, org.apache.thrift.TException
+     {
+       send_alter_database(dbname, db);
+       recv_alter_database();
+     }
+ 
+     public void send_alter_database(String dbname, Database db) throws org.apache.thrift.TException
+     {
+       alter_database_args args = new alter_database_args();
+       args.setDbname(dbname);
+       args.setDb(db);
+       sendBase("alter_database", args);
+     }
+ 
+     public void recv_alter_database() throws MetaException, NoSuchObjectException, org.apache.thrift.TException
+     {
+       alter_database_result result = new alter_database_result();
+       receiveBase(result, "alter_database");
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       return;
+     }
+ 
+     public Type get_type(String name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException
+     {
+       send_get_type(name);
+       return recv_get_type();
+     }
+ 
+     public void send_get_type(String name) throws org.apache.thrift.TException
+     {
+       get_type_args args = new get_type_args();
+       args.setName(name);
+       sendBase("get_type", args);
+     }
+ 
+     public Type recv_get_type() throws MetaException, NoSuchObjectException, org.apache.thrift.TException
+     {
+       get_type_result result = new get_type_result();
+       receiveBase(result, "get_type");
+       if (result.isSetSuccess()) {
+         return result.success;
+       }
+       if (result.o1 != null) {
+         throw result.o1;
+       }
+       if (result.o2 != null) {
+         throw result.o2;
+       }
+       throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_type failed: unknown result");
+     }
+ 
+     public boolean create_type(Type type) throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TException
+     {
+       send_create_type(type);
+       return recv_create_type();
+     }
+ 
+     public void send_create_type(Type type) throws org.apache.thrift.TException
+     {
+       create_type_args args = new create_type_args();
+       args.setType(type);
+       sendBase("create_type", args);
+     }
+ 
+     public boolean recv_create_type() throws AlreadyExistsException, InvalidObjectException, MetaException, org.apache.thrift.TExcep

<TRUNCATED>

[16/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MapSchemaVersionToSerdeRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MapSchemaVersionToSerdeRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MapSchemaVersionToSerdeRequest.java
new file mode 100644
index 0000000..81eb732
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MapSchemaVersionToSerdeRequest.java
@@ -0,0 +1,504 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class MapSchemaVersionToSerdeRequest implements org.apache.thrift.TBase<MapSchemaVersionToSerdeRequest, MapSchemaVersionToSerdeRequest._Fields>, java.io.Serializable, Cloneable, Comparable<MapSchemaVersionToSerdeRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MapSchemaVersionToSerdeRequest");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_VERSION_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaVersion", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField SERDE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("serdeName", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new MapSchemaVersionToSerdeRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new MapSchemaVersionToSerdeRequestTupleSchemeFactory());
+  }
+
+  private SchemaVersionDescriptor schemaVersion; // required
+  private String serdeName; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SCHEMA_VERSION((short)1, "schemaVersion"),
+    SERDE_NAME((short)2, "serdeName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_VERSION
+          return SCHEMA_VERSION;
+        case 2: // SERDE_NAME
+          return SERDE_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_VERSION, new org.apache.thrift.meta_data.FieldMetaData("schemaVersion", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SchemaVersionDescriptor.class)));
+    tmpMap.put(_Fields.SERDE_NAME, new org.apache.thrift.meta_data.FieldMetaData("serdeName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MapSchemaVersionToSerdeRequest.class, metaDataMap);
+  }
+
+  public MapSchemaVersionToSerdeRequest() {
+  }
+
+  public MapSchemaVersionToSerdeRequest(
+    SchemaVersionDescriptor schemaVersion,
+    String serdeName)
+  {
+    this();
+    this.schemaVersion = schemaVersion;
+    this.serdeName = serdeName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public MapSchemaVersionToSerdeRequest(MapSchemaVersionToSerdeRequest other) {
+    if (other.isSetSchemaVersion()) {
+      this.schemaVersion = new SchemaVersionDescriptor(other.schemaVersion);
+    }
+    if (other.isSetSerdeName()) {
+      this.serdeName = other.serdeName;
+    }
+  }
+
+  public MapSchemaVersionToSerdeRequest deepCopy() {
+    return new MapSchemaVersionToSerdeRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaVersion = null;
+    this.serdeName = null;
+  }
+
+  public SchemaVersionDescriptor getSchemaVersion() {
+    return this.schemaVersion;
+  }
+
+  public void setSchemaVersion(SchemaVersionDescriptor schemaVersion) {
+    this.schemaVersion = schemaVersion;
+  }
+
+  public void unsetSchemaVersion() {
+    this.schemaVersion = null;
+  }
+
+  /** Returns true if field schemaVersion is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaVersion() {
+    return this.schemaVersion != null;
+  }
+
+  public void setSchemaVersionIsSet(boolean value) {
+    if (!value) {
+      this.schemaVersion = null;
+    }
+  }
+
+  public String getSerdeName() {
+    return this.serdeName;
+  }
+
+  public void setSerdeName(String serdeName) {
+    this.serdeName = serdeName;
+  }
+
+  public void unsetSerdeName() {
+    this.serdeName = null;
+  }
+
+  /** Returns true if field serdeName is set (has been assigned a value) and false otherwise */
+  public boolean isSetSerdeName() {
+    return this.serdeName != null;
+  }
+
+  public void setSerdeNameIsSet(boolean value) {
+    if (!value) {
+      this.serdeName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_VERSION:
+      if (value == null) {
+        unsetSchemaVersion();
+      } else {
+        setSchemaVersion((SchemaVersionDescriptor)value);
+      }
+      break;
+
+    case SERDE_NAME:
+      if (value == null) {
+        unsetSerdeName();
+      } else {
+        setSerdeName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_VERSION:
+      return getSchemaVersion();
+
+    case SERDE_NAME:
+      return getSerdeName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_VERSION:
+      return isSetSchemaVersion();
+    case SERDE_NAME:
+      return isSetSerdeName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof MapSchemaVersionToSerdeRequest)
+      return this.equals((MapSchemaVersionToSerdeRequest)that);
+    return false;
+  }
+
+  public boolean equals(MapSchemaVersionToSerdeRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaVersion = true && this.isSetSchemaVersion();
+    boolean that_present_schemaVersion = true && that.isSetSchemaVersion();
+    if (this_present_schemaVersion || that_present_schemaVersion) {
+      if (!(this_present_schemaVersion && that_present_schemaVersion))
+        return false;
+      if (!this.schemaVersion.equals(that.schemaVersion))
+        return false;
+    }
+
+    boolean this_present_serdeName = true && this.isSetSerdeName();
+    boolean that_present_serdeName = true && that.isSetSerdeName();
+    if (this_present_serdeName || that_present_serdeName) {
+      if (!(this_present_serdeName && that_present_serdeName))
+        return false;
+      if (!this.serdeName.equals(that.serdeName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaVersion = true && (isSetSchemaVersion());
+    list.add(present_schemaVersion);
+    if (present_schemaVersion)
+      list.add(schemaVersion);
+
+    boolean present_serdeName = true && (isSetSerdeName());
+    list.add(present_serdeName);
+    if (present_serdeName)
+      list.add(serdeName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(MapSchemaVersionToSerdeRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaVersion()).compareTo(other.isSetSchemaVersion());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaVersion()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaVersion, other.schemaVersion);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSerdeName()).compareTo(other.isSetSerdeName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSerdeName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.serdeName, other.serdeName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("MapSchemaVersionToSerdeRequest(");
+    boolean first = true;
+
+    sb.append("schemaVersion:");
+    if (this.schemaVersion == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaVersion);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("serdeName:");
+    if (this.serdeName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.serdeName);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (schemaVersion != null) {
+      schemaVersion.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class MapSchemaVersionToSerdeRequestStandardSchemeFactory implements SchemeFactory {
+    public MapSchemaVersionToSerdeRequestStandardScheme getScheme() {
+      return new MapSchemaVersionToSerdeRequestStandardScheme();
+    }
+  }
+
+  private static class MapSchemaVersionToSerdeRequestStandardScheme extends StandardScheme<MapSchemaVersionToSerdeRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MapSchemaVersionToSerdeRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_VERSION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.schemaVersion = new SchemaVersionDescriptor();
+              struct.schemaVersion.read(iprot);
+              struct.setSchemaVersionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // SERDE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.serdeName = iprot.readString();
+              struct.setSerdeNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MapSchemaVersionToSerdeRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaVersion != null) {
+        oprot.writeFieldBegin(SCHEMA_VERSION_FIELD_DESC);
+        struct.schemaVersion.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.serdeName != null) {
+        oprot.writeFieldBegin(SERDE_NAME_FIELD_DESC);
+        oprot.writeString(struct.serdeName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class MapSchemaVersionToSerdeRequestTupleSchemeFactory implements SchemeFactory {
+    public MapSchemaVersionToSerdeRequestTupleScheme getScheme() {
+      return new MapSchemaVersionToSerdeRequestTupleScheme();
+    }
+  }
+
+  private static class MapSchemaVersionToSerdeRequestTupleScheme extends TupleScheme<MapSchemaVersionToSerdeRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, MapSchemaVersionToSerdeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaVersion()) {
+        optionals.set(0);
+      }
+      if (struct.isSetSerdeName()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetSchemaVersion()) {
+        struct.schemaVersion.write(oprot);
+      }
+      if (struct.isSetSerdeName()) {
+        oprot.writeString(struct.serdeName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, MapSchemaVersionToSerdeRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.schemaVersion = new SchemaVersionDescriptor();
+        struct.schemaVersion.read(iprot);
+        struct.setSchemaVersionIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.serdeName = iprot.readString();
+        struct.setSerdeNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
new file mode 100644
index 0000000..3510995
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Materialization.java
@@ -0,0 +1,750 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Materialization implements org.apache.thrift.TBase<Materialization, Materialization._Fields>, java.io.Serializable, Cloneable, Comparable<Materialization> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Materialization");
+
+  private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)1);
+  private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField INVALIDATION_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("invalidationTime", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField SOURCE_TABLES_UPDATE_DELETE_MODIFIED_FIELD_DESC = new org.apache.thrift.protocol.TField("sourceTablesUpdateDeleteModified", org.apache.thrift.protocol.TType.BOOL, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new MaterializationStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new MaterializationTupleSchemeFactory());
+  }
+
+  private Set<String> tablesUsed; // required
+  private String validTxnList; // optional
+  private long invalidationTime; // optional
+  private boolean sourceTablesUpdateDeleteModified; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TABLES_USED((short)1, "tablesUsed"),
+    VALID_TXN_LIST((short)2, "validTxnList"),
+    INVALIDATION_TIME((short)3, "invalidationTime"),
+    SOURCE_TABLES_UPDATE_DELETE_MODIFIED((short)4, "sourceTablesUpdateDeleteModified");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TABLES_USED
+          return TABLES_USED;
+        case 2: // VALID_TXN_LIST
+          return VALID_TXN_LIST;
+        case 3: // INVALIDATION_TIME
+          return INVALIDATION_TIME;
+        case 4: // SOURCE_TABLES_UPDATE_DELETE_MODIFIED
+          return SOURCE_TABLES_UPDATE_DELETE_MODIFIED;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __INVALIDATIONTIME_ISSET_ID = 0;
+  private static final int __SOURCETABLESUPDATEDELETEMODIFIED_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST,_Fields.INVALIDATION_TIME,_Fields.SOURCE_TABLES_UPDATE_DELETE_MODIFIED};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TABLES_USED, new org.apache.thrift.meta_data.FieldMetaData("tablesUsed", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.INVALIDATION_TIME, new org.apache.thrift.meta_data.FieldMetaData("invalidationTime", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.SOURCE_TABLES_UPDATE_DELETE_MODIFIED, new org.apache.thrift.meta_data.FieldMetaData("sourceTablesUpdateDeleteModified", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Materialization.class, metaDataMap);
+  }
+
+  public Materialization() {
+  }
+
+  public Materialization(
+    Set<String> tablesUsed)
+  {
+    this();
+    this.tablesUsed = tablesUsed;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Materialization(Materialization other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetTablesUsed()) {
+      Set<String> __this__tablesUsed = new HashSet<String>(other.tablesUsed);
+      this.tablesUsed = __this__tablesUsed;
+    }
+    if (other.isSetValidTxnList()) {
+      this.validTxnList = other.validTxnList;
+    }
+    this.invalidationTime = other.invalidationTime;
+    this.sourceTablesUpdateDeleteModified = other.sourceTablesUpdateDeleteModified;
+  }
+
+  public Materialization deepCopy() {
+    return new Materialization(this);
+  }
+
+  @Override
+  public void clear() {
+    this.tablesUsed = null;
+    this.validTxnList = null;
+    setInvalidationTimeIsSet(false);
+    this.invalidationTime = 0;
+    setSourceTablesUpdateDeleteModifiedIsSet(false);
+    this.sourceTablesUpdateDeleteModified = false;
+  }
+
+  public int getTablesUsedSize() {
+    return (this.tablesUsed == null) ? 0 : this.tablesUsed.size();
+  }
+
+  public java.util.Iterator<String> getTablesUsedIterator() {
+    return (this.tablesUsed == null) ? null : this.tablesUsed.iterator();
+  }
+
+  public void addToTablesUsed(String elem) {
+    if (this.tablesUsed == null) {
+      this.tablesUsed = new HashSet<String>();
+    }
+    this.tablesUsed.add(elem);
+  }
+
+  public Set<String> getTablesUsed() {
+    return this.tablesUsed;
+  }
+
+  public void setTablesUsed(Set<String> tablesUsed) {
+    this.tablesUsed = tablesUsed;
+  }
+
+  public void unsetTablesUsed() {
+    this.tablesUsed = null;
+  }
+
+  /** Returns true if field tablesUsed is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablesUsed() {
+    return this.tablesUsed != null;
+  }
+
+  public void setTablesUsedIsSet(boolean value) {
+    if (!value) {
+      this.tablesUsed = null;
+    }
+  }
+
+  public String getValidTxnList() {
+    return this.validTxnList;
+  }
+
+  public void setValidTxnList(String validTxnList) {
+    this.validTxnList = validTxnList;
+  }
+
+  public void unsetValidTxnList() {
+    this.validTxnList = null;
+  }
+
+  /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidTxnList() {
+    return this.validTxnList != null;
+  }
+
+  public void setValidTxnListIsSet(boolean value) {
+    if (!value) {
+      this.validTxnList = null;
+    }
+  }
+
+  public long getInvalidationTime() {
+    return this.invalidationTime;
+  }
+
+  public void setInvalidationTime(long invalidationTime) {
+    this.invalidationTime = invalidationTime;
+    setInvalidationTimeIsSet(true);
+  }
+
+  public void unsetInvalidationTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __INVALIDATIONTIME_ISSET_ID);
+  }
+
+  /** Returns true if field invalidationTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetInvalidationTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __INVALIDATIONTIME_ISSET_ID);
+  }
+
+  public void setInvalidationTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __INVALIDATIONTIME_ISSET_ID, value);
+  }
+
+  public boolean isSourceTablesUpdateDeleteModified() {
+    return this.sourceTablesUpdateDeleteModified;
+  }
+
+  public void setSourceTablesUpdateDeleteModified(boolean sourceTablesUpdateDeleteModified) {
+    this.sourceTablesUpdateDeleteModified = sourceTablesUpdateDeleteModified;
+    setSourceTablesUpdateDeleteModifiedIsSet(true);
+  }
+
+  public void unsetSourceTablesUpdateDeleteModified() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SOURCETABLESUPDATEDELETEMODIFIED_ISSET_ID);
+  }
+
+  /** Returns true if field sourceTablesUpdateDeleteModified is set (has been assigned a value) and false otherwise */
+  public boolean isSetSourceTablesUpdateDeleteModified() {
+    return EncodingUtils.testBit(__isset_bitfield, __SOURCETABLESUPDATEDELETEMODIFIED_ISSET_ID);
+  }
+
+  public void setSourceTablesUpdateDeleteModifiedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SOURCETABLESUPDATEDELETEMODIFIED_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TABLES_USED:
+      if (value == null) {
+        unsetTablesUsed();
+      } else {
+        setTablesUsed((Set<String>)value);
+      }
+      break;
+
+    case VALID_TXN_LIST:
+      if (value == null) {
+        unsetValidTxnList();
+      } else {
+        setValidTxnList((String)value);
+      }
+      break;
+
+    case INVALIDATION_TIME:
+      if (value == null) {
+        unsetInvalidationTime();
+      } else {
+        setInvalidationTime((Long)value);
+      }
+      break;
+
+    case SOURCE_TABLES_UPDATE_DELETE_MODIFIED:
+      if (value == null) {
+        unsetSourceTablesUpdateDeleteModified();
+      } else {
+        setSourceTablesUpdateDeleteModified((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TABLES_USED:
+      return getTablesUsed();
+
+    case VALID_TXN_LIST:
+      return getValidTxnList();
+
+    case INVALIDATION_TIME:
+      return getInvalidationTime();
+
+    case SOURCE_TABLES_UPDATE_DELETE_MODIFIED:
+      return isSourceTablesUpdateDeleteModified();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TABLES_USED:
+      return isSetTablesUsed();
+    case VALID_TXN_LIST:
+      return isSetValidTxnList();
+    case INVALIDATION_TIME:
+      return isSetInvalidationTime();
+    case SOURCE_TABLES_UPDATE_DELETE_MODIFIED:
+      return isSetSourceTablesUpdateDeleteModified();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Materialization)
+      return this.equals((Materialization)that);
+    return false;
+  }
+
+  public boolean equals(Materialization that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_tablesUsed = true && this.isSetTablesUsed();
+    boolean that_present_tablesUsed = true && that.isSetTablesUsed();
+    if (this_present_tablesUsed || that_present_tablesUsed) {
+      if (!(this_present_tablesUsed && that_present_tablesUsed))
+        return false;
+      if (!this.tablesUsed.equals(that.tablesUsed))
+        return false;
+    }
+
+    boolean this_present_validTxnList = true && this.isSetValidTxnList();
+    boolean that_present_validTxnList = true && that.isSetValidTxnList();
+    if (this_present_validTxnList || that_present_validTxnList) {
+      if (!(this_present_validTxnList && that_present_validTxnList))
+        return false;
+      if (!this.validTxnList.equals(that.validTxnList))
+        return false;
+    }
+
+    boolean this_present_invalidationTime = true && this.isSetInvalidationTime();
+    boolean that_present_invalidationTime = true && that.isSetInvalidationTime();
+    if (this_present_invalidationTime || that_present_invalidationTime) {
+      if (!(this_present_invalidationTime && that_present_invalidationTime))
+        return false;
+      if (this.invalidationTime != that.invalidationTime)
+        return false;
+    }
+
+    boolean this_present_sourceTablesUpdateDeleteModified = true && this.isSetSourceTablesUpdateDeleteModified();
+    boolean that_present_sourceTablesUpdateDeleteModified = true && that.isSetSourceTablesUpdateDeleteModified();
+    if (this_present_sourceTablesUpdateDeleteModified || that_present_sourceTablesUpdateDeleteModified) {
+      if (!(this_present_sourceTablesUpdateDeleteModified && that_present_sourceTablesUpdateDeleteModified))
+        return false;
+      if (this.sourceTablesUpdateDeleteModified != that.sourceTablesUpdateDeleteModified)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_tablesUsed = true && (isSetTablesUsed());
+    list.add(present_tablesUsed);
+    if (present_tablesUsed)
+      list.add(tablesUsed);
+
+    boolean present_validTxnList = true && (isSetValidTxnList());
+    list.add(present_validTxnList);
+    if (present_validTxnList)
+      list.add(validTxnList);
+
+    boolean present_invalidationTime = true && (isSetInvalidationTime());
+    list.add(present_invalidationTime);
+    if (present_invalidationTime)
+      list.add(invalidationTime);
+
+    boolean present_sourceTablesUpdateDeleteModified = true && (isSetSourceTablesUpdateDeleteModified());
+    list.add(present_sourceTablesUpdateDeleteModified);
+    if (present_sourceTablesUpdateDeleteModified)
+      list.add(sourceTablesUpdateDeleteModified);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Materialization other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablesUsed()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidTxnList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetInvalidationTime()).compareTo(other.isSetInvalidationTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetInvalidationTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.invalidationTime, other.invalidationTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSourceTablesUpdateDeleteModified()).compareTo(other.isSetSourceTablesUpdateDeleteModified());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSourceTablesUpdateDeleteModified()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sourceTablesUpdateDeleteModified, other.sourceTablesUpdateDeleteModified);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Materialization(");
+    boolean first = true;
+
+    sb.append("tablesUsed:");
+    if (this.tablesUsed == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tablesUsed);
+    }
+    first = false;
+    if (isSetValidTxnList()) {
+      if (!first) sb.append(", ");
+      sb.append("validTxnList:");
+      if (this.validTxnList == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.validTxnList);
+      }
+      first = false;
+    }
+    if (isSetInvalidationTime()) {
+      if (!first) sb.append(", ");
+      sb.append("invalidationTime:");
+      sb.append(this.invalidationTime);
+      first = false;
+    }
+    if (isSetSourceTablesUpdateDeleteModified()) {
+      if (!first) sb.append(", ");
+      sb.append("sourceTablesUpdateDeleteModified:");
+      sb.append(this.sourceTablesUpdateDeleteModified);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTablesUsed()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablesUsed' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class MaterializationStandardSchemeFactory implements SchemeFactory {
+    public MaterializationStandardScheme getScheme() {
+      return new MaterializationStandardScheme();
+    }
+  }
+
+  private static class MaterializationStandardScheme extends StandardScheme<Materialization> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Materialization struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TABLES_USED
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
+              {
+                org.apache.thrift.protocol.TSet _set864 = iprot.readSetBegin();
+                struct.tablesUsed = new HashSet<String>(2*_set864.size);
+                String _elem865;
+                for (int _i866 = 0; _i866 < _set864.size; ++_i866)
+                {
+                  _elem865 = iprot.readString();
+                  struct.tablesUsed.add(_elem865);
+                }
+                iprot.readSetEnd();
+              }
+              struct.setTablesUsedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // VALID_TXN_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.validTxnList = iprot.readString();
+              struct.setValidTxnListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // INVALIDATION_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.invalidationTime = iprot.readI64();
+              struct.setInvalidationTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // SOURCE_TABLES_UPDATE_DELETE_MODIFIED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.sourceTablesUpdateDeleteModified = iprot.readBool();
+              struct.setSourceTablesUpdateDeleteModifiedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Materialization struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.tablesUsed != null) {
+        oprot.writeFieldBegin(TABLES_USED_FIELD_DESC);
+        {
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size()));
+          for (String _iter867 : struct.tablesUsed)
+          {
+            oprot.writeString(_iter867);
+          }
+          oprot.writeSetEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.validTxnList != null) {
+        if (struct.isSetValidTxnList()) {
+          oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC);
+          oprot.writeString(struct.validTxnList);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetInvalidationTime()) {
+        oprot.writeFieldBegin(INVALIDATION_TIME_FIELD_DESC);
+        oprot.writeI64(struct.invalidationTime);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetSourceTablesUpdateDeleteModified()) {
+        oprot.writeFieldBegin(SOURCE_TABLES_UPDATE_DELETE_MODIFIED_FIELD_DESC);
+        oprot.writeBool(struct.sourceTablesUpdateDeleteModified);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class MaterializationTupleSchemeFactory implements SchemeFactory {
+    public MaterializationTupleScheme getScheme() {
+      return new MaterializationTupleScheme();
+    }
+  }
+
+  private static class MaterializationTupleScheme extends TupleScheme<Materialization> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.tablesUsed.size());
+        for (String _iter868 : struct.tablesUsed)
+        {
+          oprot.writeString(_iter868);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetValidTxnList()) {
+        optionals.set(0);
+      }
+      if (struct.isSetInvalidationTime()) {
+        optionals.set(1);
+      }
+      if (struct.isSetSourceTablesUpdateDeleteModified()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetValidTxnList()) {
+        oprot.writeString(struct.validTxnList);
+      }
+      if (struct.isSetInvalidationTime()) {
+        oprot.writeI64(struct.invalidationTime);
+      }
+      if (struct.isSetSourceTablesUpdateDeleteModified()) {
+        oprot.writeBool(struct.sourceTablesUpdateDeleteModified);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Materialization struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TSet _set869 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.tablesUsed = new HashSet<String>(2*_set869.size);
+        String _elem870;
+        for (int _i871 = 0; _i871 < _set869.size; ++_i871)
+        {
+          _elem870 = iprot.readString();
+          struct.tablesUsed.add(_elem870);
+        }
+      }
+      struct.setTablesUsedIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.validTxnList = iprot.readString();
+        struct.setValidTxnListIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.invalidationTime = iprot.readI64();
+        struct.setInvalidationTimeIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.sourceTablesUpdateDeleteModified = iprot.readBool();
+        struct.setSourceTablesUpdateDeleteModifiedIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
new file mode 100644
index 0000000..b699a0d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetaException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class MetaException extends TException implements org.apache.thrift.TBase<MetaException, MetaException._Fields>, java.io.Serializable, Cloneable, Comparable<MetaException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetaException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new MetaExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new MetaExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MetaException.class, metaDataMap);
+  }
+
+  public MetaException() {
+  }
+
+  public MetaException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public MetaException(MetaException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public MetaException deepCopy() {
+    return new MetaException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof MetaException)
+      return this.equals((MetaException)that);
+    return false;
+  }
+
+  public boolean equals(MetaException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(MetaException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("MetaException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class MetaExceptionStandardSchemeFactory implements SchemeFactory {
+    public MetaExceptionStandardScheme getScheme() {
+      return new MetaExceptionStandardScheme();
+    }
+  }
+
+  private static class MetaExceptionStandardScheme extends StandardScheme<MetaException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MetaException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MetaException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class MetaExceptionTupleSchemeFactory implements SchemeFactory {
+    public MetaExceptionTupleScheme getScheme() {
+      return new MetaExceptionTupleScheme();
+    }
+  }
+
+  private static class MetaExceptionTupleScheme extends TupleScheme<MetaException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, MetaException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, MetaException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java
new file mode 100644
index 0000000..0679ff4
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/MetadataPpdResult.java
@@ -0,0 +1,517 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class MetadataPpdResult implements org.apache.thrift.TBase<MetadataPpdResult, MetadataPpdResult._Fields>, java.io.Serializable, Cloneable, Comparable<MetadataPpdResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("MetadataPpdResult");
+
+  private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField INCLUDE_BITSET_FIELD_DESC = new org.apache.thrift.protocol.TField("includeBitset", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new MetadataPpdResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new MetadataPpdResultTupleSchemeFactory());
+  }
+
+  private ByteBuffer metadata; // optional
+  private ByteBuffer includeBitset; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    METADATA((short)1, "metadata"),
+    INCLUDE_BITSET((short)2, "includeBitset");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // METADATA
+          return METADATA;
+        case 2: // INCLUDE_BITSET
+          return INCLUDE_BITSET;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.METADATA,_Fields.INCLUDE_BITSET};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    tmpMap.put(_Fields.INCLUDE_BITSET, new org.apache.thrift.meta_data.FieldMetaData("includeBitset", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(MetadataPpdResult.class, metaDataMap);
+  }
+
+  public MetadataPpdResult() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public MetadataPpdResult(MetadataPpdResult other) {
+    if (other.isSetMetadata()) {
+      this.metadata = org.apache.thrift.TBaseHelper.copyBinary(other.metadata);
+    }
+    if (other.isSetIncludeBitset()) {
+      this.includeBitset = org.apache.thrift.TBaseHelper.copyBinary(other.includeBitset);
+    }
+  }
+
+  public MetadataPpdResult deepCopy() {
+    return new MetadataPpdResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.metadata = null;
+    this.includeBitset = null;
+  }
+
+  public byte[] getMetadata() {
+    setMetadata(org.apache.thrift.TBaseHelper.rightSize(metadata));
+    return metadata == null ? null : metadata.array();
+  }
+
+  public ByteBuffer bufferForMetadata() {
+    return org.apache.thrift.TBaseHelper.copyBinary(metadata);
+  }
+
+  public void setMetadata(byte[] metadata) {
+    this.metadata = metadata == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(metadata, metadata.length));
+  }
+
+  public void setMetadata(ByteBuffer metadata) {
+    this.metadata = org.apache.thrift.TBaseHelper.copyBinary(metadata);
+  }
+
+  public void unsetMetadata() {
+    this.metadata = null;
+  }
+
+  /** Returns true if field metadata is set (has been assigned a value) and false otherwise */
+  public boolean isSetMetadata() {
+    return this.metadata != null;
+  }
+
+  public void setMetadataIsSet(boolean value) {
+    if (!value) {
+      this.metadata = null;
+    }
+  }
+
+  public byte[] getIncludeBitset() {
+    setIncludeBitset(org.apache.thrift.TBaseHelper.rightSize(includeBitset));
+    return includeBitset == null ? null : includeBitset.array();
+  }
+
+  public ByteBuffer bufferForIncludeBitset() {
+    return org.apache.thrift.TBaseHelper.copyBinary(includeBitset);
+  }
+
+  public void setIncludeBitset(byte[] includeBitset) {
+    this.includeBitset = includeBitset == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(includeBitset, includeBitset.length));
+  }
+
+  public void setIncludeBitset(ByteBuffer includeBitset) {
+    this.includeBitset = org.apache.thrift.TBaseHelper.copyBinary(includeBitset);
+  }
+
+  public void unsetIncludeBitset() {
+    this.includeBitset = null;
+  }
+
+  /** Returns true if field includeBitset is set (has been assigned a value) and false otherwise */
+  public boolean isSetIncludeBitset() {
+    return this.includeBitset != null;
+  }
+
+  public void setIncludeBitsetIsSet(boolean value) {
+    if (!value) {
+      this.includeBitset = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case METADATA:
+      if (value == null) {
+        unsetMetadata();
+      } else {
+        setMetadata((ByteBuffer)value);
+      }
+      break;
+
+    case INCLUDE_BITSET:
+      if (value == null) {
+        unsetIncludeBitset();
+      } else {
+        setIncludeBitset((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case METADATA:
+      return getMetadata();
+
+    case INCLUDE_BITSET:
+      return getIncludeBitset();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case METADATA:
+      return isSetMetadata();
+    case INCLUDE_BITSET:
+      return isSetIncludeBitset();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof MetadataPpdResult)
+      return this.equals((MetadataPpdResult)that);
+    return false;
+  }
+
+  public boolean equals(MetadataPpdResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_metadata = true && this.isSetMetadata();
+    boolean that_present_metadata = true && that.isSetMetadata();
+    if (this_present_metadata || that_present_metadata) {
+      if (!(this_present_metadata && that_present_metadata))
+        return false;
+      if (!this.metadata.equals(that.metadata))
+        return false;
+    }
+
+    boolean this_present_includeBitset = true && this.isSetIncludeBitset();
+    boolean that_present_includeBitset = true && that.isSetIncludeBitset();
+    if (this_present_includeBitset || that_present_includeBitset) {
+      if (!(this_present_includeBitset && that_present_includeBitset))
+        return false;
+      if (!this.includeBitset.equals(that.includeBitset))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_metadata = true && (isSetMetadata());
+    list.add(present_metadata);
+    if (present_metadata)
+      list.add(metadata);
+
+    boolean present_includeBitset = true && (isSetIncludeBitset());
+    list.add(present_includeBitset);
+    if (present_includeBitset)
+      list.add(includeBitset);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(MetadataPpdResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMetadata()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIncludeBitset()).compareTo(other.isSetIncludeBitset());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIncludeBitset()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.includeBitset, other.includeBitset);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("MetadataPpdResult(");
+    boolean first = true;
+
+    if (isSetMetadata()) {
+      sb.append("metadata:");
+      if (this.metadata == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.metadata, sb);
+      }
+      first = false;
+    }
+    if (isSetIncludeBitset()) {
+      if (!first) sb.append(", ");
+      sb.append("includeBitset:");
+      if (this.includeBitset == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.includeBitset, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class MetadataPpdResultStandardSchemeFactory implements SchemeFactory {
+    public MetadataPpdResultStandardScheme getScheme() {
+      return new MetadataPpdResultStandardScheme();
+    }
+  }
+
+  private static class MetadataPpdResultStandardScheme extends StandardScheme<MetadataPpdResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, MetadataPpdResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // METADATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.metadata = iprot.readBinary();
+              struct.setMetadataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // INCLUDE_BITSET
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.includeBitset = iprot.readBinary();
+              struct.setIncludeBitsetIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, MetadataPpdResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.metadata != null) {
+        if (struct.isSetMetadata()) {
+          oprot.writeFieldBegin(METADATA_FIELD_DESC);
+          oprot.writeBinary(struct.metadata);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.includeBitset != null) {
+        if (struct.isSetIncludeBitset()) {
+          oprot.writeFieldBegin(INCLUDE_BITSET_FIELD_DESC);
+          oprot.writeBinary(struct.includeBitset);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class MetadataPpdResultTupleSchemeFactory implements SchemeFactory {
+    public MetadataPpdResultTupleScheme getScheme() {
+      return new MetadataPpdResultTupleScheme();
+    }
+  }
+
+  private static class MetadataPpdResultTupleScheme extends TupleScheme<MetadataPpdResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, MetadataPpdResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMetadata()) {
+        optionals.set(0);
+      }
+      if (struct.isSetIncludeBitset()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetMetadata()) {
+        oprot.writeBinary(struct.metadata);
+      }
+      if (struct.isSetIncludeBitset()) {
+        oprot.writeBinary(struct.includeBitset);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, MetadataPpdResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.metadata = iprot.readBinary();
+        struct.setMetadataIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.includeBitset = iprot.readBinary();
+        struct.setIncludeBitsetIsSet(true);
+      }
+    }
+  }
+
+}
+


[32/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java
new file mode 100644
index 0000000..69378c9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsRequest.java
@@ -0,0 +1,591 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DefaultConstraintsRequest implements org.apache.thrift.TBase<DefaultConstraintsRequest, DefaultConstraintsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DefaultConstraintsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DefaultConstraintsRequest");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DefaultConstraintsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DefaultConstraintsRequestTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String db_name; // required
+  private String tbl_name; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    DB_NAME((short)2, "db_name"),
+    TBL_NAME((short)3, "tbl_name");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TBL_NAME
+          return TBL_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DefaultConstraintsRequest.class, metaDataMap);
+  }
+
+  public DefaultConstraintsRequest() {
+  }
+
+  public DefaultConstraintsRequest(
+    String catName,
+    String db_name,
+    String tbl_name)
+  {
+    this();
+    this.catName = catName;
+    this.db_name = db_name;
+    this.tbl_name = tbl_name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DefaultConstraintsRequest(DefaultConstraintsRequest other) {
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDb_name()) {
+      this.db_name = other.db_name;
+    }
+    if (other.isSetTbl_name()) {
+      this.tbl_name = other.tbl_name;
+    }
+  }
+
+  public DefaultConstraintsRequest deepCopy() {
+    return new DefaultConstraintsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.db_name = null;
+    this.tbl_name = null;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDb_name() {
+    return this.db_name;
+  }
+
+  public void setDb_name(String db_name) {
+    this.db_name = db_name;
+  }
+
+  public void unsetDb_name() {
+    this.db_name = null;
+  }
+
+  /** Returns true if field db_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetDb_name() {
+    return this.db_name != null;
+  }
+
+  public void setDb_nameIsSet(boolean value) {
+    if (!value) {
+      this.db_name = null;
+    }
+  }
+
+  public String getTbl_name() {
+    return this.tbl_name;
+  }
+
+  public void setTbl_name(String tbl_name) {
+    this.tbl_name = tbl_name;
+  }
+
+  public void unsetTbl_name() {
+    this.tbl_name = null;
+  }
+
+  /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTbl_name() {
+    return this.tbl_name != null;
+  }
+
+  public void setTbl_nameIsSet(boolean value) {
+    if (!value) {
+      this.tbl_name = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDb_name();
+      } else {
+        setDb_name((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTbl_name();
+      } else {
+        setTbl_name((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDb_name();
+
+    case TBL_NAME:
+      return getTbl_name();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDb_name();
+    case TBL_NAME:
+      return isSetTbl_name();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DefaultConstraintsRequest)
+      return this.equals((DefaultConstraintsRequest)that);
+    return false;
+  }
+
+  public boolean equals(DefaultConstraintsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_db_name = true && this.isSetDb_name();
+    boolean that_present_db_name = true && that.isSetDb_name();
+    if (this_present_db_name || that_present_db_name) {
+      if (!(this_present_db_name && that_present_db_name))
+        return false;
+      if (!this.db_name.equals(that.db_name))
+        return false;
+    }
+
+    boolean this_present_tbl_name = true && this.isSetTbl_name();
+    boolean that_present_tbl_name = true && that.isSetTbl_name();
+    if (this_present_tbl_name || that_present_tbl_name) {
+      if (!(this_present_tbl_name && that_present_tbl_name))
+        return false;
+      if (!this.tbl_name.equals(that.tbl_name))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_db_name = true && (isSetDb_name());
+    list.add(present_db_name);
+    if (present_db_name)
+      list.add(db_name);
+
+    boolean present_tbl_name = true && (isSetTbl_name());
+    list.add(present_tbl_name);
+    if (present_tbl_name)
+      list.add(tbl_name);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DefaultConstraintsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDb_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTbl_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DefaultConstraintsRequest(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("db_name:");
+    if (this.db_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.db_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tbl_name:");
+    if (this.tbl_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tbl_name);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetCatName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDb_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTbl_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tbl_name' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DefaultConstraintsRequestStandardSchemeFactory implements SchemeFactory {
+    public DefaultConstraintsRequestStandardScheme getScheme() {
+      return new DefaultConstraintsRequestStandardScheme();
+    }
+  }
+
+  private static class DefaultConstraintsRequestStandardScheme extends StandardScheme<DefaultConstraintsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.db_name = iprot.readString();
+              struct.setDb_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tbl_name = iprot.readString();
+              struct.setTbl_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.db_name != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.db_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tbl_name != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tbl_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DefaultConstraintsRequestTupleSchemeFactory implements SchemeFactory {
+    public DefaultConstraintsRequestTupleScheme getScheme() {
+      return new DefaultConstraintsRequestTupleScheme();
+    }
+  }
+
+  private static class DefaultConstraintsRequestTupleScheme extends TupleScheme<DefaultConstraintsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.catName);
+      oprot.writeString(struct.db_name);
+      oprot.writeString(struct.tbl_name);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.catName = iprot.readString();
+      struct.setCatNameIsSet(true);
+      struct.db_name = iprot.readString();
+      struct.setDb_nameIsSet(true);
+      struct.tbl_name = iprot.readString();
+      struct.setTbl_nameIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java
new file mode 100644
index 0000000..47b8d1c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DefaultConstraintsResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DefaultConstraintsResponse implements org.apache.thrift.TBase<DefaultConstraintsResponse, DefaultConstraintsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<DefaultConstraintsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DefaultConstraintsResponse");
+
+  private static final org.apache.thrift.protocol.TField DEFAULT_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultConstraints", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DefaultConstraintsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DefaultConstraintsResponseTupleSchemeFactory());
+  }
+
+  private List<SQLDefaultConstraint> defaultConstraints; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DEFAULT_CONSTRAINTS((short)1, "defaultConstraints");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DEFAULT_CONSTRAINTS
+          return DEFAULT_CONSTRAINTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DEFAULT_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("defaultConstraints", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLDefaultConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DefaultConstraintsResponse.class, metaDataMap);
+  }
+
+  public DefaultConstraintsResponse() {
+  }
+
+  public DefaultConstraintsResponse(
+    List<SQLDefaultConstraint> defaultConstraints)
+  {
+    this();
+    this.defaultConstraints = defaultConstraints;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DefaultConstraintsResponse(DefaultConstraintsResponse other) {
+    if (other.isSetDefaultConstraints()) {
+      List<SQLDefaultConstraint> __this__defaultConstraints = new ArrayList<SQLDefaultConstraint>(other.defaultConstraints.size());
+      for (SQLDefaultConstraint other_element : other.defaultConstraints) {
+        __this__defaultConstraints.add(new SQLDefaultConstraint(other_element));
+      }
+      this.defaultConstraints = __this__defaultConstraints;
+    }
+  }
+
+  public DefaultConstraintsResponse deepCopy() {
+    return new DefaultConstraintsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.defaultConstraints = null;
+  }
+
+  public int getDefaultConstraintsSize() {
+    return (this.defaultConstraints == null) ? 0 : this.defaultConstraints.size();
+  }
+
+  public java.util.Iterator<SQLDefaultConstraint> getDefaultConstraintsIterator() {
+    return (this.defaultConstraints == null) ? null : this.defaultConstraints.iterator();
+  }
+
+  public void addToDefaultConstraints(SQLDefaultConstraint elem) {
+    if (this.defaultConstraints == null) {
+      this.defaultConstraints = new ArrayList<SQLDefaultConstraint>();
+    }
+    this.defaultConstraints.add(elem);
+  }
+
+  public List<SQLDefaultConstraint> getDefaultConstraints() {
+    return this.defaultConstraints;
+  }
+
+  public void setDefaultConstraints(List<SQLDefaultConstraint> defaultConstraints) {
+    this.defaultConstraints = defaultConstraints;
+  }
+
+  public void unsetDefaultConstraints() {
+    this.defaultConstraints = null;
+  }
+
+  /** Returns true if field defaultConstraints is set (has been assigned a value) and false otherwise */
+  public boolean isSetDefaultConstraints() {
+    return this.defaultConstraints != null;
+  }
+
+  public void setDefaultConstraintsIsSet(boolean value) {
+    if (!value) {
+      this.defaultConstraints = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DEFAULT_CONSTRAINTS:
+      if (value == null) {
+        unsetDefaultConstraints();
+      } else {
+        setDefaultConstraints((List<SQLDefaultConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DEFAULT_CONSTRAINTS:
+      return getDefaultConstraints();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DEFAULT_CONSTRAINTS:
+      return isSetDefaultConstraints();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DefaultConstraintsResponse)
+      return this.equals((DefaultConstraintsResponse)that);
+    return false;
+  }
+
+  public boolean equals(DefaultConstraintsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_defaultConstraints = true && this.isSetDefaultConstraints();
+    boolean that_present_defaultConstraints = true && that.isSetDefaultConstraints();
+    if (this_present_defaultConstraints || that_present_defaultConstraints) {
+      if (!(this_present_defaultConstraints && that_present_defaultConstraints))
+        return false;
+      if (!this.defaultConstraints.equals(that.defaultConstraints))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_defaultConstraints = true && (isSetDefaultConstraints());
+    list.add(present_defaultConstraints);
+    if (present_defaultConstraints)
+      list.add(defaultConstraints);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DefaultConstraintsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDefaultConstraints()).compareTo(other.isSetDefaultConstraints());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDefaultConstraints()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultConstraints, other.defaultConstraints);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DefaultConstraintsResponse(");
+    boolean first = true;
+
+    sb.append("defaultConstraints:");
+    if (this.defaultConstraints == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.defaultConstraints);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDefaultConstraints()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'defaultConstraints' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DefaultConstraintsResponseStandardSchemeFactory implements SchemeFactory {
+    public DefaultConstraintsResponseStandardScheme getScheme() {
+      return new DefaultConstraintsResponseStandardScheme();
+    }
+  }
+
+  private static class DefaultConstraintsResponseStandardScheme extends StandardScheme<DefaultConstraintsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DEFAULT_CONSTRAINTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list352 = iprot.readListBegin();
+                struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list352.size);
+                SQLDefaultConstraint _elem353;
+                for (int _i354 = 0; _i354 < _list352.size; ++_i354)
+                {
+                  _elem353 = new SQLDefaultConstraint();
+                  _elem353.read(iprot);
+                  struct.defaultConstraints.add(_elem353);
+                }
+                iprot.readListEnd();
+              }
+              struct.setDefaultConstraintsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.defaultConstraints != null) {
+        oprot.writeFieldBegin(DEFAULT_CONSTRAINTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.defaultConstraints.size()));
+          for (SQLDefaultConstraint _iter355 : struct.defaultConstraints)
+          {
+            _iter355.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DefaultConstraintsResponseTupleSchemeFactory implements SchemeFactory {
+    public DefaultConstraintsResponseTupleScheme getScheme() {
+      return new DefaultConstraintsResponseTupleScheme();
+    }
+  }
+
+  private static class DefaultConstraintsResponseTupleScheme extends TupleScheme<DefaultConstraintsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.defaultConstraints.size());
+        for (SQLDefaultConstraint _iter356 : struct.defaultConstraints)
+        {
+          _iter356.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DefaultConstraintsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list357 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.defaultConstraints = new ArrayList<SQLDefaultConstraint>(_list357.size);
+        SQLDefaultConstraint _elem358;
+        for (int _i359 = 0; _i359 < _list357.size; ++_i359)
+        {
+          _elem358 = new SQLDefaultConstraint();
+          _elem358.read(iprot);
+          struct.defaultConstraints.add(_elem358);
+        }
+      }
+      struct.setDefaultConstraintsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
new file mode 100644
index 0000000..ba0ac29
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DoubleColumnStatsData.java
@@ -0,0 +1,799 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DoubleColumnStatsData implements org.apache.thrift.TBase<DoubleColumnStatsData, DoubleColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DoubleColumnStatsData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DoubleColumnStatsData");
+
+  private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.DOUBLE, (short)1);
+  private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.DOUBLE, (short)2);
+  private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4);
+  private static final org.apache.thrift.protocol.TField BIT_VECTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("bitVectors", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DoubleColumnStatsDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DoubleColumnStatsDataTupleSchemeFactory());
+  }
+
+  private double lowValue; // optional
+  private double highValue; // optional
+  private long numNulls; // required
+  private long numDVs; // required
+  private ByteBuffer bitVectors; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOW_VALUE((short)1, "lowValue"),
+    HIGH_VALUE((short)2, "highValue"),
+    NUM_NULLS((short)3, "numNulls"),
+    NUM_DVS((short)4, "numDVs"),
+    BIT_VECTORS((short)5, "bitVectors");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOW_VALUE
+          return LOW_VALUE;
+        case 2: // HIGH_VALUE
+          return HIGH_VALUE;
+        case 3: // NUM_NULLS
+          return NUM_NULLS;
+        case 4: // NUM_DVS
+          return NUM_DVS;
+        case 5: // BIT_VECTORS
+          return BIT_VECTORS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __LOWVALUE_ISSET_ID = 0;
+  private static final int __HIGHVALUE_ISSET_ID = 1;
+  private static final int __NUMNULLS_ISSET_ID = 2;
+  private static final int __NUMDVS_ISSET_ID = 3;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE,_Fields.BIT_VECTORS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.DOUBLE)));
+    tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.BIT_VECTORS, new org.apache.thrift.meta_data.FieldMetaData("bitVectors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DoubleColumnStatsData.class, metaDataMap);
+  }
+
+  public DoubleColumnStatsData() {
+  }
+
+  public DoubleColumnStatsData(
+    long numNulls,
+    long numDVs)
+  {
+    this();
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DoubleColumnStatsData(DoubleColumnStatsData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.lowValue = other.lowValue;
+    this.highValue = other.highValue;
+    this.numNulls = other.numNulls;
+    this.numDVs = other.numDVs;
+    if (other.isSetBitVectors()) {
+      this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(other.bitVectors);
+    }
+  }
+
+  public DoubleColumnStatsData deepCopy() {
+    return new DoubleColumnStatsData(this);
+  }
+
+  @Override
+  public void clear() {
+    setLowValueIsSet(false);
+    this.lowValue = 0.0;
+    setHighValueIsSet(false);
+    this.highValue = 0.0;
+    setNumNullsIsSet(false);
+    this.numNulls = 0;
+    setNumDVsIsSet(false);
+    this.numDVs = 0;
+    this.bitVectors = null;
+  }
+
+  public double getLowValue() {
+    return this.lowValue;
+  }
+
+  public void setLowValue(double lowValue) {
+    this.lowValue = lowValue;
+    setLowValueIsSet(true);
+  }
+
+  public void unsetLowValue() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOWVALUE_ISSET_ID);
+  }
+
+  /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetLowValue() {
+    return EncodingUtils.testBit(__isset_bitfield, __LOWVALUE_ISSET_ID);
+  }
+
+  public void setLowValueIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOWVALUE_ISSET_ID, value);
+  }
+
+  public double getHighValue() {
+    return this.highValue;
+  }
+
+  public void setHighValue(double highValue) {
+    this.highValue = highValue;
+    setHighValueIsSet(true);
+  }
+
+  public void unsetHighValue() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HIGHVALUE_ISSET_ID);
+  }
+
+  /** Returns true if field highValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetHighValue() {
+    return EncodingUtils.testBit(__isset_bitfield, __HIGHVALUE_ISSET_ID);
+  }
+
+  public void setHighValueIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HIGHVALUE_ISSET_ID, value);
+  }
+
+  public long getNumNulls() {
+    return this.numNulls;
+  }
+
+  public void setNumNulls(long numNulls) {
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  public void unsetNumNulls() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumNulls() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  public void setNumNullsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value);
+  }
+
+  public long getNumDVs() {
+    return this.numDVs;
+  }
+
+  public void setNumDVs(long numDVs) {
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  public void unsetNumDVs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumDVs() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  public void setNumDVsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value);
+  }
+
+  public byte[] getBitVectors() {
+    setBitVectors(org.apache.thrift.TBaseHelper.rightSize(bitVectors));
+    return bitVectors == null ? null : bitVectors.array();
+  }
+
+  public ByteBuffer bufferForBitVectors() {
+    return org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void setBitVectors(byte[] bitVectors) {
+    this.bitVectors = bitVectors == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bitVectors, bitVectors.length));
+  }
+
+  public void setBitVectors(ByteBuffer bitVectors) {
+    this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void unsetBitVectors() {
+    this.bitVectors = null;
+  }
+
+  /** Returns true if field bitVectors is set (has been assigned a value) and false otherwise */
+  public boolean isSetBitVectors() {
+    return this.bitVectors != null;
+  }
+
+  public void setBitVectorsIsSet(boolean value) {
+    if (!value) {
+      this.bitVectors = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOW_VALUE:
+      if (value == null) {
+        unsetLowValue();
+      } else {
+        setLowValue((Double)value);
+      }
+      break;
+
+    case HIGH_VALUE:
+      if (value == null) {
+        unsetHighValue();
+      } else {
+        setHighValue((Double)value);
+      }
+      break;
+
+    case NUM_NULLS:
+      if (value == null) {
+        unsetNumNulls();
+      } else {
+        setNumNulls((Long)value);
+      }
+      break;
+
+    case NUM_DVS:
+      if (value == null) {
+        unsetNumDVs();
+      } else {
+        setNumDVs((Long)value);
+      }
+      break;
+
+    case BIT_VECTORS:
+      if (value == null) {
+        unsetBitVectors();
+      } else {
+        setBitVectors((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOW_VALUE:
+      return getLowValue();
+
+    case HIGH_VALUE:
+      return getHighValue();
+
+    case NUM_NULLS:
+      return getNumNulls();
+
+    case NUM_DVS:
+      return getNumDVs();
+
+    case BIT_VECTORS:
+      return getBitVectors();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOW_VALUE:
+      return isSetLowValue();
+    case HIGH_VALUE:
+      return isSetHighValue();
+    case NUM_NULLS:
+      return isSetNumNulls();
+    case NUM_DVS:
+      return isSetNumDVs();
+    case BIT_VECTORS:
+      return isSetBitVectors();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DoubleColumnStatsData)
+      return this.equals((DoubleColumnStatsData)that);
+    return false;
+  }
+
+  public boolean equals(DoubleColumnStatsData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lowValue = true && this.isSetLowValue();
+    boolean that_present_lowValue = true && that.isSetLowValue();
+    if (this_present_lowValue || that_present_lowValue) {
+      if (!(this_present_lowValue && that_present_lowValue))
+        return false;
+      if (this.lowValue != that.lowValue)
+        return false;
+    }
+
+    boolean this_present_highValue = true && this.isSetHighValue();
+    boolean that_present_highValue = true && that.isSetHighValue();
+    if (this_present_highValue || that_present_highValue) {
+      if (!(this_present_highValue && that_present_highValue))
+        return false;
+      if (this.highValue != that.highValue)
+        return false;
+    }
+
+    boolean this_present_numNulls = true;
+    boolean that_present_numNulls = true;
+    if (this_present_numNulls || that_present_numNulls) {
+      if (!(this_present_numNulls && that_present_numNulls))
+        return false;
+      if (this.numNulls != that.numNulls)
+        return false;
+    }
+
+    boolean this_present_numDVs = true;
+    boolean that_present_numDVs = true;
+    if (this_present_numDVs || that_present_numDVs) {
+      if (!(this_present_numDVs && that_present_numDVs))
+        return false;
+      if (this.numDVs != that.numDVs)
+        return false;
+    }
+
+    boolean this_present_bitVectors = true && this.isSetBitVectors();
+    boolean that_present_bitVectors = true && that.isSetBitVectors();
+    if (this_present_bitVectors || that_present_bitVectors) {
+      if (!(this_present_bitVectors && that_present_bitVectors))
+        return false;
+      if (!this.bitVectors.equals(that.bitVectors))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lowValue = true && (isSetLowValue());
+    list.add(present_lowValue);
+    if (present_lowValue)
+      list.add(lowValue);
+
+    boolean present_highValue = true && (isSetHighValue());
+    list.add(present_highValue);
+    if (present_highValue)
+      list.add(highValue);
+
+    boolean present_numNulls = true;
+    list.add(present_numNulls);
+    if (present_numNulls)
+      list.add(numNulls);
+
+    boolean present_numDVs = true;
+    list.add(present_numDVs);
+    if (present_numDVs)
+      list.add(numDVs);
+
+    boolean present_bitVectors = true && (isSetBitVectors());
+    list.add(present_bitVectors);
+    if (present_bitVectors)
+      list.add(bitVectors);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DoubleColumnStatsData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(other.isSetLowValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLowValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, other.lowValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(other.isSetHighValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHighValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, other.highValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(other.isSetNumNulls());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumNulls()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, other.numNulls);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(other.isSetNumDVs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumDVs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, other.numDVs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetBitVectors()).compareTo(other.isSetBitVectors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBitVectors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bitVectors, other.bitVectors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DoubleColumnStatsData(");
+    boolean first = true;
+
+    if (isSetLowValue()) {
+      sb.append("lowValue:");
+      sb.append(this.lowValue);
+      first = false;
+    }
+    if (isSetHighValue()) {
+      if (!first) sb.append(", ");
+      sb.append("highValue:");
+      sb.append(this.highValue);
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("numNulls:");
+    sb.append(this.numNulls);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numDVs:");
+    sb.append(this.numDVs);
+    first = false;
+    if (isSetBitVectors()) {
+      if (!first) sb.append(", ");
+      sb.append("bitVectors:");
+      if (this.bitVectors == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.bitVectors, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNumNulls()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumDVs()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DoubleColumnStatsDataStandardSchemeFactory implements SchemeFactory {
+    public DoubleColumnStatsDataStandardScheme getScheme() {
+      return new DoubleColumnStatsDataStandardScheme();
+    }
+  }
+
+  private static class DoubleColumnStatsDataStandardScheme extends StandardScheme<DoubleColumnStatsData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DoubleColumnStatsData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOW_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.lowValue = iprot.readDouble();
+              struct.setLowValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // HIGH_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.DOUBLE) {
+              struct.highValue = iprot.readDouble();
+              struct.setHighValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NUM_NULLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numNulls = iprot.readI64();
+              struct.setNumNullsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // NUM_DVS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numDVs = iprot.readI64();
+              struct.setNumDVsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // BIT_VECTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.bitVectors = iprot.readBinary();
+              struct.setBitVectorsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DoubleColumnStatsData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetLowValue()) {
+        oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC);
+        oprot.writeDouble(struct.lowValue);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetHighValue()) {
+        oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC);
+        oprot.writeDouble(struct.highValue);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC);
+      oprot.writeI64(struct.numNulls);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_DVS_FIELD_DESC);
+      oprot.writeI64(struct.numDVs);
+      oprot.writeFieldEnd();
+      if (struct.bitVectors != null) {
+        if (struct.isSetBitVectors()) {
+          oprot.writeFieldBegin(BIT_VECTORS_FIELD_DESC);
+          oprot.writeBinary(struct.bitVectors);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DoubleColumnStatsDataTupleSchemeFactory implements SchemeFactory {
+    public DoubleColumnStatsDataTupleScheme getScheme() {
+      return new DoubleColumnStatsDataTupleScheme();
+    }
+  }
+
+  private static class DoubleColumnStatsDataTupleScheme extends TupleScheme<DoubleColumnStatsData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DoubleColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.numNulls);
+      oprot.writeI64(struct.numDVs);
+      BitSet optionals = new BitSet();
+      if (struct.isSetLowValue()) {
+        optionals.set(0);
+      }
+      if (struct.isSetHighValue()) {
+        optionals.set(1);
+      }
+      if (struct.isSetBitVectors()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetLowValue()) {
+        oprot.writeDouble(struct.lowValue);
+      }
+      if (struct.isSetHighValue()) {
+        oprot.writeDouble(struct.highValue);
+      }
+      if (struct.isSetBitVectors()) {
+        oprot.writeBinary(struct.bitVectors);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DoubleColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.numNulls = iprot.readI64();
+      struct.setNumNullsIsSet(true);
+      struct.numDVs = iprot.readI64();
+      struct.setNumDVsIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.lowValue = iprot.readDouble();
+        struct.setLowValueIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.highValue = iprot.readDouble();
+        struct.setHighValueIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.bitVectors = iprot.readBinary();
+        struct.setBitVectorsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java
new file mode 100644
index 0000000..a11fe47
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropCatalogRequest.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DropCatalogRequest implements org.apache.thrift.TBase<DropCatalogRequest, DropCatalogRequest._Fields>, java.io.Serializable, Cloneable, Comparable<DropCatalogRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DropCatalogRequest");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DropCatalogRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DropCatalogRequestTupleSchemeFactory());
+  }
+
+  private String name; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DropCatalogRequest.class, metaDataMap);
+  }
+
+  public DropCatalogRequest() {
+  }
+
+  public DropCatalogRequest(
+    String name)
+  {
+    this();
+    this.name = name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DropCatalogRequest(DropCatalogRequest other) {
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+  }
+
+  public DropCatalogRequest deepCopy() {
+    return new DropCatalogRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DropCatalogRequest)
+      return this.equals((DropCatalogRequest)that);
+    return false;
+  }
+
+  public boolean equals(DropCatalogRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DropCatalogRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DropCatalogRequest(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DropCatalogRequestStandardSchemeFactory implements SchemeFactory {
+    public DropCatalogRequestStandardScheme getScheme() {
+      return new DropCatalogRequestStandardScheme();
+    }
+  }
+
+  private static class DropCatalogRequestStandardScheme extends StandardScheme<DropCatalogRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DropCatalogRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DropCatalogRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DropCatalogRequestTupleSchemeFactory implements SchemeFactory {
+    public DropCatalogRequestTupleScheme getScheme() {
+      return new DropCatalogRequestTupleScheme();
+    }
+  }
+
+  private static class DropCatalogRequestTupleScheme extends TupleScheme<DropCatalogRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DropCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DropCatalogRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+    }
+  }
+
+}
+


[65/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
index 0000000,3785f89..4ef6786
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnHandler.java
@@@ -1,0 -1,4906 +1,4949 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import java.io.PrintWriter;
+ import java.nio.ByteBuffer;
+ import java.sql.Connection;
+ import java.sql.Driver;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.SQLFeatureNotSupportedException;
+ import java.sql.Savepoint;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.BitSet;
+ import java.util.Calendar;
+ import java.util.Collections;
+ import java.util.Comparator;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Properties;
+ import java.util.Set;
+ import java.util.SortedSet;
+ import java.util.TimeZone;
+ import java.util.TreeSet;
+ import java.util.concurrent.ConcurrentHashMap;
+ import java.util.concurrent.Semaphore;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ import java.util.concurrent.locks.ReentrantLock;
+ import java.util.regex.Pattern;
+ 
+ import javax.sql.DataSource;
+ 
+ import org.apache.commons.lang.ArrayUtils;
+ import org.apache.commons.lang.NotImplementedException;
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
+ import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.DatabaseProduct;
+ import org.apache.hadoop.hive.metastore.MaterializationsInvalidationCache;
+ import org.apache.hadoop.hive.metastore.MaterializationsRebuildLockHandler;
+ import org.apache.hadoop.hive.metastore.Warehouse;
+ import org.apache.hadoop.hive.metastore.MetaStoreListenerNotifier;
+ import org.apache.hadoop.hive.metastore.TransactionalMetaStoreEventListener;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProvider;
+ import org.apache.hadoop.hive.metastore.datasource.DataSourceProviderFactory;
+ import org.apache.hadoop.hive.metastore.events.AbortTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AllocWriteIdEvent;
+ import org.apache.hadoop.hive.metastore.events.CommitTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.OpenTxnEvent;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.StringableMap;
+ import org.apache.hadoop.util.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
++
+ import com.google.common.annotations.VisibleForTesting;
+ 
+ /**
+  * A handler to answer transaction related calls that come into the metastore
+  * server.
+  *
+  * Note on log messages:  Please include txnid:X and lockid info using
+  * {@link JavaUtils#txnIdToString(long)}
+  * and {@link JavaUtils#lockIdToString(long)} in all messages.
+  * The txnid:X and lockid:Y matches how Thrift object toString() methods are generated,
+  * so keeping the format consistent makes grep'ing the logs much easier.
+  *
+  * Note on HIVE_LOCKS.hl_last_heartbeat.
+  * For locks that are part of transaction, we set this 0 (would rather set it to NULL but
+  * Currently the DB schema has this NOT NULL) and only update/read heartbeat from corresponding
+  * transaction in TXNS.
+  *
+  * In general there can be multiple metastores where this logic can execute, thus the DB is
+  * used to ensure proper mutexing of operations.
+  * Select ... For Update (or equivalent: either MsSql with(updlock) or actual Update stmt) is
+  * used to properly sequence operations.  Most notably:
+  * 1. various sequence IDs are generated with aid of this mutex
+  * 2. ensuring that each (Hive) Transaction state is transitioned atomically.  Transaction state
+  *  includes its actual state (Open, Aborted) as well as it's lock list/component list.  Thus all
+  *  per transaction ops, either start by update/delete of the relevant TXNS row or do S4U on that row.
+  *  This allows almost all operations to run at READ_COMMITTED and minimizes DB deadlocks.
+  * 3. checkLock() - this is mutexted entirely since we must ensure that while we check if some lock
+  *  can be granted, no other (strictly speaking "earlier") lock can change state.
+  *
+  * The exception to his is Derby which doesn't support proper S4U.  Derby is always running embedded
+  * (this is the only supported configuration for Derby)
+  * in the same JVM as HiveMetaStoreHandler thus we use JVM wide lock to properly sequnce the operations.
+  *
+  * {@link #derbyLock}
+ 
+  * If we ever decide to run remote Derby server, according to
+  * https://db.apache.org/derby/docs/10.0/manuals/develop/develop78.html all transactions will be
+  * seriazlied, so that would also work though has not been tested.
+  *
+  * General design note:
+  * It's imperative that any operation on a txn (e.g. commit), ensure (atomically) that this txn is
+  * still valid and active.  In the code this is usually achieved at the same time the txn record
+  * is locked for some operation.
+  * 
+  * Note on retry logic:
+  * Metastore has retry logic in both {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient}
+  * and {@link org.apache.hadoop.hive.metastore.RetryingHMSHandler}.  The retry logic there is very
+  * generic and is not aware whether the operations are idempotent or not.  (This is separate from
+  * retry logic here in TxnHander which can/does retry DB errors intelligently).  The worst case is
+  * when an op here issues a successful commit against the RDBMS but the calling stack doesn't
+  * receive the ack and retries.  (If an op fails before commit, it's trivially idempotent)
+  * Thus the ops here need to be made idempotent as much as possible or
+  * the metstore call stack should have logic not to retry.  There are {@link RetrySemantics}
+  * annotations to document the behavior.
+  */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ abstract class TxnHandler implements TxnStore, TxnStore.MutexAPI {
+ 
+   static final protected char INITIATED_STATE = 'i';
+   static final protected char WORKING_STATE = 'w';
+   static final protected char READY_FOR_CLEANING = 'r';
+   static final char FAILED_STATE = 'f';
+   static final char SUCCEEDED_STATE = 's';
+   static final char ATTEMPTED_STATE = 'a';
+ 
+   // Compactor types
+   static final protected char MAJOR_TYPE = 'a';
+   static final protected char MINOR_TYPE = 'i';
+ 
+   // Transaction states
+   static final protected char TXN_ABORTED = 'a';
+   static final protected char TXN_OPEN = 'o';
+   //todo: make these like OperationType and remove above char constatns
+   enum TxnStatus {OPEN, ABORTED, COMMITTED, UNKNOWN}
+ 
+   public enum TxnType {
+     DEFAULT(0), REPL_CREATED(1), READ_ONLY(2);
+ 
+     private final int value;
+     TxnType(int value) {
+       this.value = value;
+     }
+ 
+     public int getValue() {
+       return value;
+     }
+   }
+ 
+   // Lock states
+   static final protected char LOCK_ACQUIRED = 'a';
+   static final protected char LOCK_WAITING = 'w';
+ 
+   // Lock types
+   static final protected char LOCK_EXCLUSIVE = 'e';
+   static final protected char LOCK_SHARED = 'r';
+   static final protected char LOCK_SEMI_SHARED = 'w';
+ 
+   static final private int ALLOWED_REPEATED_DEADLOCKS = 10;
+   static final private Logger LOG = LoggerFactory.getLogger(TxnHandler.class.getName());
+ 
+   static private DataSource connPool;
+   private static DataSource connPoolMutex;
+   static private boolean doRetryOnConnPool = false;
+ 
+   private List<TransactionalMetaStoreEventListener> transactionalListeners;
+   
+   private enum OpertaionType {
+     SELECT('s'), INSERT('i'), UPDATE('u'), DELETE('d');
+     private final char sqlConst;
+     OpertaionType(char sqlConst) {
+       this.sqlConst = sqlConst;
+     }
+     public String toString() {
+       return Character.toString(sqlConst);
+     }
+     public static OpertaionType fromString(char sqlConst) {
+       switch (sqlConst) {
+         case 's':
+           return SELECT;
+         case 'i':
+           return INSERT;
+         case 'u':
+           return UPDATE;
+         case 'd':
+           return DELETE;
+         default:
+           throw new IllegalArgumentException(quoteChar(sqlConst));
+       }
+     }
+     public static OpertaionType fromDataOperationType(DataOperationType dop) {
+       switch (dop) {
+         case SELECT:
+           return OpertaionType.SELECT;
+         case INSERT:
+           return OpertaionType.INSERT;
+         case UPDATE:
+           return OpertaionType.UPDATE;
+         case DELETE:
+           return OpertaionType.DELETE;
+         default:
+           throw new IllegalArgumentException("Unexpected value: " + dop);
+       }
+     }
+   }
+ 
+   // Maximum number of open transactions that's allowed
+   private static volatile int maxOpenTxns = 0;
+   // Whether number of open transactions reaches the threshold
+   private static volatile boolean tooManyOpenTxns = false;
+ 
+   /**
+    * Number of consecutive deadlocks we have seen
+    */
+   private int deadlockCnt;
+   private long deadlockRetryInterval;
+   protected Configuration conf;
+   private static DatabaseProduct dbProduct;
+   private static SQLGenerator sqlGenerator;
+ 
+   // (End user) Transaction timeout, in milliseconds.
+   private long timeout;
+ 
+   private String identifierQuoteString; // quotes to use for quoting tables, where necessary
+   private long retryInterval;
+   private int retryLimit;
+   private int retryNum;
+   // Current number of open txns
+   private AtomicInteger numOpenTxns;
+ 
+   /**
+    * Derby specific concurrency control
+    */
+   private static final ReentrantLock derbyLock = new ReentrantLock(true);
+   /**
+    * must be static since even in UT there may be > 1 instance of TxnHandler
+    * (e.g. via Compactor services)
+    */
+   private final static ConcurrentHashMap<String, Semaphore> derbyKey2Lock = new ConcurrentHashMap<>();
+   private static final String hostname = JavaUtils.hostname();
+ 
+   // Private methods should never catch SQLException and then throw MetaException.  The public
+   // methods depend on SQLException coming back so they can detect and handle deadlocks.  Private
+   // methods should only throw MetaException when they explicitly know there's a logic error and
+   // they want to throw past the public methods.
+   //
+   // All public methods that write to the database have to check for deadlocks when a SQLException
+   // comes back and handle it if they see one.  This has to be done with the connection pooling
+   // in mind.  To do this they should call checkRetryable() AFTER rolling back the db transaction,
+   // and then they should catch RetryException and call themselves recursively. See commitTxn for an example.
+ 
+   public TxnHandler() {
+   }
+ 
+   /**
+    * This is logically part of c'tor and must be called prior to any other method.
+    * Not physically part of c'tor due to use of reflection
+    */
+   public void setConf(Configuration conf) {
+     this.conf = conf;
+ 
+     checkQFileTestHack();
+ 
+     synchronized (TxnHandler.class) {
+       if (connPool == null) {
+         Connection dbConn = null;
+         // Set up the JDBC connection pool
+         try {
+           int maxPoolSize = MetastoreConf.getIntVar(conf, ConfVars.CONNECTION_POOLING_MAX_CONNECTIONS);
+           long getConnectionTimeoutMs = 30000;
+           connPool = setupJdbcConnectionPool(conf, maxPoolSize, getConnectionTimeoutMs);
+           /*the mutex pools should ideally be somewhat larger since some operations require 1
+            connection from each pool and we want to avoid taking a connection from primary pool
+            and then blocking because mutex pool is empty.  There is only 1 thread in any HMS trying
+            to mutex on each MUTEX_KEY except MUTEX_KEY.CheckLock.  The CheckLock operation gets a
+            connection from connPool first, then connPoolMutex.  All others, go in the opposite
+            order (not very elegant...).  So number of connection requests for connPoolMutex cannot
+            exceed (size of connPool + MUTEX_KEY.values().length - 1).*/
+           connPoolMutex = setupJdbcConnectionPool(conf, maxPoolSize + MUTEX_KEY.values().length, getConnectionTimeoutMs);
+           dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+           determineDatabaseProduct(dbConn);
+           sqlGenerator = new SQLGenerator(dbProduct, conf);
+         } catch (SQLException e) {
+           String msg = "Unable to instantiate JDBC connection pooling, " + e.getMessage();
+           LOG.error(msg);
+           throw new RuntimeException(e);
+         } finally {
+           closeDbConn(dbConn);
+         }
+       }
+     }
+ 
+     numOpenTxns = Metrics.getOrCreateGauge(MetricsConstants.NUM_OPEN_TXNS);
+ 
+     timeout = MetastoreConf.getTimeVar(conf, ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS);
+     buildJumpTable();
+     retryInterval = MetastoreConf.getTimeVar(conf, ConfVars.HMS_HANDLER_INTERVAL,
+         TimeUnit.MILLISECONDS);
+     retryLimit = MetastoreConf.getIntVar(conf, ConfVars.HMS_HANDLER_ATTEMPTS);
+     deadlockRetryInterval = retryInterval / 10;
+     maxOpenTxns = MetastoreConf.getIntVar(conf, ConfVars.MAX_OPEN_TXNS);
+ 
+     try {
+       transactionalListeners = MetaStoreUtils.getMetaStoreListeners(
+               TransactionalMetaStoreEventListener.class,
+                       conf, MetastoreConf.getVar(conf, ConfVars.TRANSACTIONAL_EVENT_LISTENERS));
+     } catch(MetaException e) {
+       String msg = "Unable to get transaction listeners, " + e.getMessage();
+       LOG.error(msg);
+       throw new RuntimeException(e);
+     }
+   }
+ 
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException {
+     try {
+       // We need to figure out the current transaction number and the list of
+       // open transactions.  To avoid needing a transaction on the underlying
+       // database we'll look at the current transaction number first.  If it
+       // subsequently shows up in the open list that's ok.
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         /**
+          * This method can run at READ_COMMITTED as long as long as
+          * {@link #openTxns(org.apache.hadoop.hive.metastore.api.OpenTxnRequest)} is atomic.
+          * More specifically, as long as advancing TransactionID in NEXT_TXN_ID is atomic with
+          * adding corresponding entries into TXNS.  The reason is that any txnid below HWM
+          * is either in TXNS and thus considered open (Open/Aborted) or it's considered Committed.
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_txn_id");
+         }
+         long hwm = rs.getLong(1);
+         if (rs.wasNull()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, null record found in next_txn_id");
+         }
+         close(rs);
+         List<TxnInfo> txnInfos = new ArrayList<>();
+         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
+         s = "select txn_id, txn_state, txn_user, txn_host, txn_started, txn_last_heartbeat from " +
+             "TXNS where txn_id <= " + hwm;
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           char c = rs.getString(2).charAt(0);
+           TxnState state;
+           switch (c) {
+             case TXN_ABORTED:
+               state = TxnState.ABORTED;
+               break;
+ 
+             case TXN_OPEN:
+               state = TxnState.OPEN;
+               break;
+ 
+             default:
+               throw new MetaException("Unexpected transaction state " + c +
+                 " found in txns table");
+           }
+           TxnInfo txnInfo = new TxnInfo(rs.getLong(1), state, rs.getString(3), rs.getString(4));
+           txnInfo.setStartedTime(rs.getLong(5));
+           txnInfo.setLastHeartbeatTime(rs.getLong(6));
+           txnInfos.add(txnInfo);
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         return new GetOpenTxnsInfoResponse(hwm, txnInfos);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getOpenTxnsInfo");
+         throw new MetaException("Unable to select from transaction database: " + getMessage(e)
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getOpenTxnsInfo();
+     }
+   }
++
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetOpenTxnsResponse getOpenTxns() throws MetaException {
+     try {
+       // We need to figure out the current transaction number and the list of
+       // open transactions.  To avoid needing a transaction on the underlying
+       // database we'll look at the current transaction number first.  If it
+       // subsequently shows up in the open list that's ok.
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         /**
+          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select ntxn_next - 1 from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_txn_id");
+         }
+         long hwm = rs.getLong(1);
+         if (rs.wasNull()) {
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, null record found in next_txn_id");
+         }
+         close(rs);
+         List<Long> openList = new ArrayList<>();
+         //need the WHERE clause below to ensure consistent results with READ_COMMITTED
+         s = "select txn_id, txn_state from TXNS where txn_id <= " + hwm + " order by txn_id";
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         long minOpenTxn = Long.MAX_VALUE;
+         BitSet abortedBits = new BitSet();
+         while (rs.next()) {
+           long txnId = rs.getLong(1);
+           openList.add(txnId);
+           char c = rs.getString(2).charAt(0);
+           if(c == TXN_OPEN) {
+             minOpenTxn = Math.min(minOpenTxn, txnId);
+           } else if (c == TXN_ABORTED) {
+             abortedBits.set(openList.size() - 1);
+           }
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+         GetOpenTxnsResponse otr = new GetOpenTxnsResponse(hwm, openList, byteBuffer);
+         if(minOpenTxn < Long.MAX_VALUE) {
+           otr.setMin_open_txn(minOpenTxn);
+         }
+         return otr;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getOpenTxns");
+         throw new MetaException("Unable to select from transaction database, "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getOpenTxns();
+     }
+   }
+ 
+   /**
+    * Retry-by-caller note:
+    * Worst case, it will leave an open txn which will timeout.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException {
+     if (!tooManyOpenTxns && numOpenTxns.get() >= maxOpenTxns) {
+       tooManyOpenTxns = true;
+     }
+     if (tooManyOpenTxns) {
+       if (numOpenTxns.get() < maxOpenTxns * 0.9) {
+         tooManyOpenTxns = false;
+       } else {
+         LOG.warn("Maximum allowed number of open transactions (" + maxOpenTxns + ") has been " +
+             "reached. Current number of open transactions: " + numOpenTxns);
+         throw new MetaException("Maximum allowed number of open transactions has been reached. " +
+             "See hive.max.open.txns.");
+       }
+     }
+ 
+     int numTxns = rqst.getNum_txns();
+     if (numTxns <= 0) {
+       throw new MetaException("Invalid input for number of txns: " + numTxns);
+     }
+ 
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         /**
+          * To make {@link #getOpenTxns()}/{@link #getOpenTxnsInfo()} work correctly, this operation must ensure
+          * that advancing the counter in NEXT_TXN_ID and adding appropriate entries to TXNS is atomic.
+          * Also, advancing the counter must work when multiple metastores are running.
+          * SELECT ... FOR UPDATE is used to prevent
+          * concurrent DB transactions being rolled back due to Write-Write conflict on NEXT_TXN_ID.
+          *
+          * In the current design, there can be several metastore instances running in a given Warehouse.
+          * This makes ideas like reserving a range of IDs to save trips to DB impossible.  For example,
+          * a client may go to MS1 and start a transaction with ID 500 to update a particular row.
+          * Now the same client will start another transaction, except it ends up on MS2 and may get
+          * transaction ID 400 and update the same row.  Now the merge that happens to materialize the snapshot
+          * on read will thing the version of the row from transaction ID 500 is the latest one.
+          *
+          * Longer term we can consider running Active-Passive MS (at least wrt to ACID operations).  This
+          * set could support a write-through cache for added performance.
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         // Make sure the user has not requested an insane amount of txns.
+         int maxTxns = MetastoreConf.getIntVar(conf, ConfVars.TXN_MAX_OPEN_BATCH);
+         if (numTxns > maxTxns) numTxns = maxTxns;
+ 
+         stmt = dbConn.createStatement();
+         List<Long> txnIds = openTxns(dbConn, stmt, rqst);
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return new OpenTxnsResponse(txnIds);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "openTxns(" + rqst + ")");
+         throw new MetaException("Unable to select from transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return openTxns(rqst);
+     }
+   }
+ 
+   private List<Long> openTxns(Connection dbConn, Statement stmt, OpenTxnRequest rqst)
+           throws SQLException, MetaException {
+     int numTxns = rqst.getNum_txns();
+     ResultSet rs = null;
+     TxnType txnType = TxnType.DEFAULT;
+     try {
+       if (rqst.isSetReplPolicy()) {
+         List<Long> targetTxnIdList = getTargetTxnIdList(rqst.getReplPolicy(), rqst.getReplSrcTxnIds(), stmt);
+ 
+         if (!targetTxnIdList.isEmpty()) {
+           if (targetTxnIdList.size() != rqst.getReplSrcTxnIds().size()) {
+             LOG.warn("target txn id number " + targetTxnIdList.toString() +
+                     " is not matching with source txn id number " + rqst.getReplSrcTxnIds().toString());
+           }
+           LOG.info("Target transactions " + targetTxnIdList.toString() + " are present for repl policy :" +
+                   rqst.getReplPolicy() + " and Source transaction id : " + rqst.getReplSrcTxnIds().toString());
+           return targetTxnIdList;
+         }
+         txnType = TxnType.REPL_CREATED;
+       }
+ 
+       String s = sqlGenerator.addForUpdateClause("select ntxn_next from NEXT_TXN_ID");
+       LOG.debug("Going to execute query <" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (!rs.next()) {
+         throw new MetaException("Transaction database not properly " +
+                 "configured, can't find next transaction id.");
+       }
+       long first = rs.getLong(1);
+       s = "update NEXT_TXN_ID set ntxn_next = " + (first + numTxns);
+       LOG.debug("Going to execute update <" + s + ">");
+       stmt.executeUpdate(s);
+ 
+       long now = getDbTime(dbConn);
+       List<Long> txnIds = new ArrayList<>(numTxns);
+ 
+       List<String> rows = new ArrayList<>();
+       for (long i = first; i < first + numTxns; i++) {
+         txnIds.add(i);
+         rows.add(i + "," + quoteChar(TXN_OPEN) + "," + now + "," + now + ","
+                 + quoteString(rqst.getUser()) + "," + quoteString(rqst.getHostname()) + "," + txnType.getValue());
+       }
+       List<String> queries = sqlGenerator.createInsertValuesStmt(
+             "TXNS (txn_id, txn_state, txn_started, txn_last_heartbeat, txn_user, txn_host, txn_type)", rows);
+       for (String q : queries) {
+         LOG.debug("Going to execute update <" + q + ">");
+         stmt.execute(q);
+       }
+ 
+       // Need to register minimum open txnid for current transactions into MIN_HISTORY table.
+       s = "select min(txn_id) from TXNS where txn_state = " + quoteChar(TXN_OPEN);
+       LOG.debug("Going to execute query <" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (!rs.next()) {
+         throw new IllegalStateException("Scalar query returned no rows?!?!!");
+       }
+ 
+       // TXNS table should have atleast one entry because we just inserted the newly opened txns.
+       // So, min(txn_id) would be a non-zero txnid.
+       long minOpenTxnId = rs.getLong(1);
+       assert (minOpenTxnId > 0);
+       rows.clear();
+       for (long txnId = first; txnId < first + numTxns; txnId++) {
+         rows.add(txnId + ", " + minOpenTxnId);
+       }
+ 
+       // Insert transaction entries into MIN_HISTORY_LEVEL.
+       List<String> inserts = sqlGenerator.createInsertValuesStmt(
+               "MIN_HISTORY_LEVEL (mhl_txnid, mhl_min_open_txnid)", rows);
+       for (String insert : inserts) {
+         LOG.debug("Going to execute insert <" + insert + ">");
+         stmt.execute(insert);
+       }
+       LOG.info("Added entries to MIN_HISTORY_LEVEL for current txns: (" + txnIds
+               + ") with min_open_txn: " + minOpenTxnId);
+ 
+       if (rqst.isSetReplPolicy()) {
+         List<String> rowsRepl = new ArrayList<>();
+ 
+         for (int i = 0; i < numTxns; i++) {
+           rowsRepl.add(
+                   quoteString(rqst.getReplPolicy()) + "," + rqst.getReplSrcTxnIds().get(i) + "," + txnIds.get(i));
+         }
+ 
+         List<String> queriesRepl = sqlGenerator.createInsertValuesStmt(
+                 "REPL_TXN_MAP (RTM_REPL_POLICY, RTM_SRC_TXN_ID, RTM_TARGET_TXN_ID)", rowsRepl);
+ 
+         for (String query : queriesRepl) {
+           LOG.info("Going to execute insert <" + query + ">");
+           stmt.execute(query);
+         }
+       }
+ 
+       if (transactionalListeners != null) {
+         MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                 EventMessage.EventType.OPEN_TXN, new OpenTxnEvent(txnIds, null), dbConn, sqlGenerator);
+       }
+       return txnIds;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   private List<Long> getTargetTxnIdList(String replPolicy, List<Long> sourceTxnIdList, Statement stmt)
+           throws SQLException {
+     ResultSet rs = null;
+     try {
+       List<String> inQueries = new ArrayList<>();
+       StringBuilder prefix = new StringBuilder();
+       StringBuilder suffix = new StringBuilder();
+       List<Long> targetTxnIdList = new ArrayList<>();
+       prefix.append("select RTM_TARGET_TXN_ID from REPL_TXN_MAP where ");
+       suffix.append(" and RTM_REPL_POLICY = " + quoteString(replPolicy));
+       TxnUtils.buildQueryWithINClause(conf, inQueries, prefix, suffix, sourceTxnIdList,
+               "RTM_SRC_TXN_ID", false, false);
+       for (String query : inQueries) {
+         LOG.debug("Going to execute select <" + query + ">");
+         rs = stmt.executeQuery(query);
+         while (rs.next()) {
+           targetTxnIdList.add(rs.getLong(1));
+         }
+       }
+       LOG.debug("targetTxnid for srcTxnId " + sourceTxnIdList.toString() + " is " + targetTxnIdList.toString());
+       return targetTxnIdList;
+     }  catch (SQLException e) {
+       LOG.warn("failed to get target txn ids " + e.getMessage());
+       throw e;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         List<Long> targetTxnIds = getTargetTxnIdList(replPolicy, Collections.singletonList(sourceTxnId), stmt);
+         if (targetTxnIds.isEmpty()) {
+           LOG.info("Txn {} not present for repl policy {}", sourceTxnId, replPolicy);
+           return -1;
+         }
+         assert (targetTxnIds.size() == 1);
+         return targetTxnIds.get(0);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getTargetTxnId(" + replPolicy + sourceTxnId + ")");
+         throw new MetaException("Unable to get target transaction id "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return getTargetTxnId(replPolicy, sourceTxnId);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException {
+     long txnid = rqst.getTxnid();
+     long sourceTxnId = -1;
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           sourceTxnId = rqst.getTxnid();
+           List<Long> targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(),
+                   Collections.singletonList(sourceTxnId), stmt);
+           if (targetTxnIds.isEmpty()) {
+             LOG.info("Target txn id is missing for source txn id : " + sourceTxnId +
+                     " and repl policy " + rqst.getReplPolicy());
+             return;
+           }
+           assert targetTxnIds.size() == 1;
+           txnid = targetTxnIds.get(0);
+         }
+ 
+         if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) {
+           TxnStatus status = findTxnState(txnid,stmt);
+           if(status == TxnStatus.ABORTED) {
+             if (rqst.isSetReplPolicy()) {
+               // in case of replication, idempotent is taken care by getTargetTxnId
+               LOG.warn("Invalid state ABORTED for transactions started using replication replay task");
+               String s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+                       " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+               LOG.info("Going to execute  <" + s + ">");
+               stmt.executeUpdate(s);
+             }
+             LOG.info("abortTxn(" + JavaUtils.txnIdToString(txnid) +
+               ") requested by it is already " + TxnStatus.ABORTED);
+             return;
+           }
+           raiseTxnUnexpectedState(status, txnid);
+         }
+ 
+         if (rqst.isSetReplPolicy()) {
+           String s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+               " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+           LOG.info("Going to execute  <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnid, null), dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "abortTxn(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       abortTxn(rqst);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException {
+     List<Long> txnids = rqst.getTxn_ids();
+     try {
+       Connection dbConn = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         int numAborted = abortTxns(dbConn, txnids, false);
+         if (numAborted != txnids.size()) {
+           LOG.warn("Abort Transactions command only aborted " + numAborted + " out of " +
+               txnids.size() + " transactions. It's possible that the other " +
+               (txnids.size() - numAborted) +
+               " transactions have been aborted or committed, or the transaction ids are invalid.");
+         }
+ 
+         for (Long txnId : txnids) {
+           if (transactionalListeners != null) {
+             MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                     EventMessage.EventType.ABORT_TXN, new AbortTxnEvent(txnId, null), dbConn, sqlGenerator);
+           }
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "abortTxns(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+             + StringUtils.stringifyException(e));
+       } finally {
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       abortTxns(rqst);
+     }
+   }
+ 
+   /**
+    * Concurrency/isolation notes:
+    * This is mutexed with {@link #openTxns(OpenTxnRequest)} and other {@link #commitTxn(CommitTxnRequest)}
+    * operations using select4update on NEXT_TXN_ID.  Also, mutexes on TXNX table for specific txnid:X
+    * see more notes below.
+    * In order to prevent lost updates, we need to determine if any 2 transactions overlap.  Each txn
+    * is viewed as an interval [M,N]. M is the txnid and N is taken from the same NEXT_TXN_ID sequence
+    * so that we can compare commit time of txn T with start time of txn S.  This sequence can be thought of
+    * as a logical time counter.  If S.commitTime < T.startTime, T and S do NOT overlap.
+    *
+    * Motivating example:
+    * Suppose we have multi-statment transactions T and S both of which are attempting x = x + 1
+    * In order to prevent lost update problem, the the non-overlapping txns must lock in the snapshot
+    * that they read appropriately.  In particular, if txns do not overlap, then one follows the other
+    * (assumig they write the same entity), and thus the 2nd must see changes of the 1st.  We ensure
+    * this by locking in snapshot after 
+    * {@link #openTxns(OpenTxnRequest)} call is made (see org.apache.hadoop.hive.ql.Driver.acquireLocksAndOpenTxn)
+    * and mutexing openTxn() with commit().  In other words, once a S.commit() starts we must ensure
+    * that txn T which will be considered a later txn, locks in a snapshot that includes the result
+    * of S's commit (assuming no other txns).
+    * As a counter example, suppose we have S[3,3] and T[4,4] (commitId=txnid means no other transactions
+    * were running in parallel).  If T and S both locked in the same snapshot (for example commit of
+    * txnid:2, which is possible if commitTxn() and openTxnx() is not mutexed)
+    * 'x' would be updated to the same value by both, i.e. lost update. 
+    */
+   @Override
+   @RetrySemantics.Idempotent("No-op if already committed")
+   public void commitTxn(CommitTxnRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException {
+     MaterializationsRebuildLockHandler materializationsRebuildLockHandler =
+         MaterializationsRebuildLockHandler.get();
+     List<TransactionRegistryInfo> txnComponents = new ArrayList<>();
+     boolean isUpdateDelete = false;
+     long txnid = rqst.getTxnid();
+     long sourceTxnId = -1;
+ 
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet lockHandle = null;
+       ResultSet commitIdRs = null, rs;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           sourceTxnId = rqst.getTxnid();
+           List<Long> targetTxnIds = getTargetTxnIdList(rqst.getReplPolicy(),
+                   Collections.singletonList(sourceTxnId), stmt);
+           if (targetTxnIds.isEmpty()) {
+             LOG.info("Target txn id is missing for source txn id : " + sourceTxnId +
+                     " and repl policy " + rqst.getReplPolicy());
+             return;
+           }
+           assert targetTxnIds.size() == 1;
+           txnid = targetTxnIds.get(0);
+         }
+ 
+         /**
+          * Runs at READ_COMMITTED with S4U on TXNS row for "txnid".  S4U ensures that no other
+          * operation can change this txn (such acquiring locks). While lock() and commitTxn()
+          * should not normally run concurrently (for same txn) but could due to bugs in the client
+          * which could then corrupt internal transaction manager state.  Also competes with abortTxn().
+          */
+         lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN);
+         if (lockHandle == null) {
+           //if here, txn was not found (in expected state)
+           TxnStatus actualTxnStatus = findTxnState(txnid, stmt);
+           if(actualTxnStatus == TxnStatus.COMMITTED) {
+             if (rqst.isSetReplPolicy()) {
+               // in case of replication, idempotent is taken care by getTargetTxnId
+               LOG.warn("Invalid state COMMITTED for transactions started using replication replay task");
+             }
+             /**
+              * This makes the operation idempotent
+              * (assume that this is most likely due to retry logic)
+              */
+             LOG.info("Nth commitTxn(" + JavaUtils.txnIdToString(txnid) + ") msg");
+             return;
+           }
+           raiseTxnUnexpectedState(actualTxnStatus, txnid);
+           shouldNeverHappen(txnid);
+           //dbConn is rolled back in finally{}
+         }
+ 
+         String conflictSQLSuffix = null;
+         if (rqst.isSetReplPolicy()) {
+           rs = null;
+         } else {
+           conflictSQLSuffix = "from TXN_COMPONENTS where tc_txnid=" + txnid + " and tc_operation_type IN(" +
+                   quoteChar(OpertaionType.UPDATE.sqlConst) + "," + quoteChar(OpertaionType.DELETE.sqlConst) + ")";
+           rs = stmt.executeQuery(sqlGenerator.addLimitClause(1,
+                   "tc_operation_type " + conflictSQLSuffix));
+         }
+         if (rs != null && rs.next()) {
+           isUpdateDelete = true;
+           close(rs);
+           //if here it means currently committing txn performed update/delete and we should check WW conflict
+           /**
+            * This S4U will mutex with other commitTxn() and openTxns(). 
+            * -1 below makes txn intervals look like [3,3] [4,4] if all txns are serial
+            * Note: it's possible to have several txns have the same commit id.  Suppose 3 txns start
+            * at the same time and no new txns start until all 3 commit.
+            * We could've incremented the sequence for commitId is well but it doesn't add anything functionally.
+            */
+           commitIdRs = stmt.executeQuery(sqlGenerator.addForUpdateClause("select ntxn_next - 1 from NEXT_TXN_ID"));
+           if (!commitIdRs.next()) {
+             throw new IllegalStateException("No rows found in NEXT_TXN_ID");
+           }
+           long commitId = commitIdRs.getLong(1);
+           Savepoint undoWriteSetForCurrentTxn = dbConn.setSavepoint();
+           /**
+            * "select distinct" is used below because
+            * 1. once we get to multi-statement txns, we only care to record that something was updated once
+            * 2. if {@link #addDynamicPartitions(AddDynamicPartitions)} is retried by caller it my create
+            *  duplicate entries in TXN_COMPONENTS
+            * but we want to add a PK on WRITE_SET which won't have unique rows w/o this distinct
+            * even if it includes all of it's columns
+            */
+           int numCompsWritten = stmt.executeUpdate(
+             "insert into WRITE_SET (ws_database, ws_table, ws_partition, ws_txnid, ws_commit_id, ws_operation_type)" +
+             " select distinct tc_database, tc_table, tc_partition, tc_txnid, " + commitId + ", tc_operation_type " + conflictSQLSuffix);
+           /**
+            * see if there are any overlapping txns wrote the same element, i.e. have a conflict
+            * Since entire commit operation is mutexed wrt other start/commit ops,
+            * committed.ws_commit_id <= current.ws_commit_id for all txns
+            * thus if committed.ws_commit_id < current.ws_txnid, transactions do NOT overlap
+            * For example, [17,20] is committed, [6,80] is being committed right now - these overlap
+            * [17,20] committed and [21,21] committing now - these do not overlap.
+            * [17,18] committed and [18,19] committing now - these overlap  (here 18 started while 17 was still running)
+            */
+           rs = stmt.executeQuery
+             (sqlGenerator.addLimitClause(1, "committed.ws_txnid, committed.ws_commit_id, committed.ws_database," +
+               "committed.ws_table, committed.ws_partition, cur.ws_commit_id cur_ws_commit_id, " +
+               "cur.ws_operation_type cur_op, committed.ws_operation_type committed_op " +
+               "from WRITE_SET committed INNER JOIN WRITE_SET cur " +
+               "ON committed.ws_database=cur.ws_database and committed.ws_table=cur.ws_table " +
+               //For partitioned table we always track writes at partition level (never at table)
+               //and for non partitioned - always at table level, thus the same table should never
+               //have entries with partition key and w/o
+               "and (committed.ws_partition=cur.ws_partition or (committed.ws_partition is null and cur.ws_partition is null)) " +
+               "where cur.ws_txnid <= committed.ws_commit_id" + //txns overlap; could replace ws_txnid
+               // with txnid, though any decent DB should infer this
+               " and cur.ws_txnid=" + txnid + //make sure RHS of join only has rows we just inserted as
+               // part of this commitTxn() op
+               " and committed.ws_txnid <> " + txnid + //and LHS only has committed txns
+               //U+U and U+D is a conflict but D+D is not and we don't currently track I in WRITE_SET at all
+               " and (committed.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) +
+               " OR cur.ws_operation_type=" + quoteChar(OpertaionType.UPDATE.sqlConst) + ")"));
+           if (rs.next()) {
+             //found a conflict
+             String committedTxn = "[" + JavaUtils.txnIdToString(rs.getLong(1)) + "," + rs.getLong(2) + "]";
+             StringBuilder resource = new StringBuilder(rs.getString(3)).append("/").append(rs.getString(4));
+             String partitionName = rs.getString(5);
+             if (partitionName != null) {
+               resource.append('/').append(partitionName);
+             }
+             String msg = "Aborting [" + JavaUtils.txnIdToString(txnid) + "," + rs.getLong(6) + "]" + " due to a write conflict on " + resource +
+               " committed by " + committedTxn + " " + rs.getString(7) + "/" + rs.getString(8);
+             close(rs);
+             //remove WRITE_SET info for current txn since it's about to abort
+             dbConn.rollback(undoWriteSetForCurrentTxn);
+             LOG.info(msg);
+             //todo: should make abortTxns() write something into TXNS.TXN_META_INFO about this
+             if (abortTxns(dbConn, Collections.singletonList(txnid), true) != 1) {
+               throw new IllegalStateException(msg + " FAILED!");
+             }
+             dbConn.commit();
+             close(null, stmt, dbConn);
+             throw new TxnAbortedException(msg);
+           } else {
+             //no conflicting operations, proceed with the rest of commit sequence
+           }
+         }
+         else {
+           /**
+            * current txn didn't update/delete anything (may have inserted), so just proceed with commit
+            *
+            * We only care about commit id for write txns, so for RO (when supported) txns we don't
+            * have to mutex on NEXT_TXN_ID.
+            * Consider: if RO txn is after a W txn, then RO's openTxns() will be mutexed with W's
+            * commitTxn() because both do S4U on NEXT_TXN_ID and thus RO will see result of W txn.
+            * If RO < W, then there is no reads-from relationship.
+            * In replication flow we don't expect any write write conflict as it should have been handled at source.
+            */
+         }
+ 
+         String s;
+         if (!rqst.isSetReplPolicy()) {
+           // Move the record from txn_components into completed_txn_components so that the compactor
+           // knows where to look to compact.
+           s = "insert into COMPLETED_TXN_COMPONENTS (ctc_txnid, ctc_database, " +
+                   "ctc_table, ctc_partition, ctc_writeid) select tc_txnid, tc_database, tc_table, " +
+                   "tc_partition, tc_writeid from TXN_COMPONENTS where tc_txnid = " + txnid;
+           LOG.debug("Going to execute insert <" + s + ">");
+ 
+           if ((stmt.executeUpdate(s)) < 1) {
+             //this can be reasonable for an empty txn START/COMMIT or read-only txn
+             //also an IUD with DP that didn't match any rows.
+             LOG.info("Expected to move at least one record from txn_components to " +
+                     "completed_txn_components when committing txn! " + JavaUtils.txnIdToString(txnid));
+           }
+         } else {
+           if (rqst.isSetWriteEventInfos()) {
+             List<String> rows = new ArrayList<>();
+             for (WriteEventInfo writeEventInfo : rqst.getWriteEventInfos()) {
+               rows.add(txnid + "," + quoteString(writeEventInfo.getDatabase()) + "," +
+                       quoteString(writeEventInfo.getTable()) + "," +
+                       quoteString(writeEventInfo.getPartition()) + "," +
+                       writeEventInfo.getWriteId());
+             }
+             List<String> queries = sqlGenerator.createInsertValuesStmt("COMPLETED_TXN_COMPONENTS " +
+                     "(ctc_txnid," + " ctc_database, ctc_table, ctc_partition, ctc_writeid)", rows);
+             for (String q : queries) {
+               LOG.debug("Going to execute insert  <" + q + "> ");
+               stmt.execute(q);
+             }
+           }
+ 
+           s = "delete from REPL_TXN_MAP where RTM_SRC_TXN_ID = " + sourceTxnId +
+                   " and RTM_REPL_POLICY = " + quoteString(rqst.getReplPolicy());
+           LOG.info("Repl going to execute  <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         // Obtain information that we need to update registry
+         s = "select ctc_database, ctc_table, ctc_writeid, ctc_timestamp from COMPLETED_TXN_COMPONENTS" +
+                 " where ctc_txnid = " + txnid;
+ 
+         LOG.debug("Going to extract table modification information for invalidation cache <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           // We only enter in this loop if the transaction actually affected any table
+           txnComponents.add(new TransactionRegistryInfo(rs.getString(1), rs.getString(2),
+               rs.getLong(3), rs.getTimestamp(4, Calendar.getInstance(TimeZone.getTimeZone("UTC"))).getTime()));
+         }
+ 
+         // cleanup all txn related metadata
+         s = "delete from TXN_COMPONENTS where tc_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from HIVE_LOCKS where hl_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from TXNS where txn_id = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         s = "delete from MIN_HISTORY_LEVEL where mhl_txnid = " + txnid;
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+         LOG.info("Removed committed transaction: (" + txnid + ") from MIN_HISTORY_LEVEL");
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.COMMIT_TXN, new CommitTxnEvent(txnid, null), dbConn, sqlGenerator);
+         }
+ 
+         MaterializationsInvalidationCache materializationsInvalidationCache =
+             MaterializationsInvalidationCache.get();
+         for (TransactionRegistryInfo info : txnComponents) {
+           if (materializationsInvalidationCache.containsMaterialization(info.dbName, info.tblName) &&
+               !materializationsRebuildLockHandler.readyToCommitResource(info.dbName, info.tblName, txnid)) {
+             throw new MetaException(
+                 "Another process is rebuilding the materialized view " + info.fullyQualifiedName);
+           }
+         }
+         LOG.debug("Going to commit");
+         close(rs);
+         dbConn.commit();
+ 
+         // Update registry with modifications
+         for (TransactionRegistryInfo info : txnComponents) {
+           materializationsInvalidationCache.notifyTableModification(
+               info.dbName, info.tblName, info.writeId, info.timestamp, isUpdateDelete);
+         }
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "commitTxn(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+           + StringUtils.stringifyException(e));
+       } finally {
+         close(commitIdRs);
+         close(lockHandle, stmt, dbConn);
+         unlockInternal();
+         for (TransactionRegistryInfo info : txnComponents) {
+           materializationsRebuildLockHandler.unlockResource(info.dbName, info.tblName, txnid);
+         }
+       }
+     } catch (RetryException e) {
+       commitTxn(rqst);
+     }
+   }
+ 
+   /**
+    * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark.
+    * @param rqst info on table/partitions and writeid snapshot to replicate.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.Idempotent("No-op if already replicated the writeid state")
+   public void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException {
+     String dbName = rqst.getDbName().toLowerCase();
+     String tblName = rqst.getTableName().toLowerCase();
+     ValidWriteIdList validWriteIdList = new ValidReaderWriteIdList(rqst.getValidWriteIdlist());
+ 
+     // Get the abortedWriteIds which are already sorted in ascending order.
+     List<Long> abortedWriteIds = getAbortedWriteIds(validWriteIdList);
+     int numAbortedWrites = abortedWriteIds.size();
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // Check if this txn state is already replicated for this given table. If yes, then it is
+         // idempotent case and just return.
+         String sql = "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName)
+                         + " and nwi_table = " + quoteString(tblName);
+         LOG.debug("Going to execute query <" + sql + ">");
+ 
+         rs = stmt.executeQuery(sql);
+         if (rs.next()) {
+           LOG.info("Idempotent flow: WriteId state <" + validWriteIdList + "> is already applied for the table: "
+                   + dbName + "." + tblName);
+           rollbackDBConn(dbConn);
+           return;
+         }
+ 
+         if (numAbortedWrites > 0) {
+           // Allocate/Map one txn per aborted writeId and abort the txn to mark writeid as aborted.
+           List<Long> txnIds = openTxns(dbConn, stmt,
+                   new OpenTxnRequest(numAbortedWrites, rqst.getUser(), rqst.getHostName()));
+           assert(numAbortedWrites == txnIds.size());
+ 
+           // Map each aborted write id with each allocated txn.
+           List<String> rows = new ArrayList<>();
+           int i = 0;
+           for (long txn : txnIds) {
+             long writeId = abortedWriteIds.get(i++);
+             rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId);
+             LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn);
+           }
+ 
+           // Insert entries to TXN_TO_WRITE_ID for aborted write ids
+           List<String> inserts = sqlGenerator.createInsertValuesStmt(
+                   "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows);
+           for (String insert : inserts) {
+             LOG.debug("Going to execute insert <" + insert + ">");
+             stmt.execute(insert);
+           }
+ 
+           // Abort all the allocated txns so that the mapped write ids are referred as aborted ones.
+           int numAborts = abortTxns(dbConn, txnIds, true);
+           assert(numAborts == numAbortedWrites);
+         }
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+ 
+         // There are some txns in the list which has no write id allocated and hence go ahead and do it.
+         // Get the next write id for the given table and update it with new next write id.
+         // It is expected NEXT_WRITE_ID doesn't have entry for this table and hence directly insert it.
+         long nextWriteId = validWriteIdList.getHighWatermark() + 1;
+ 
+         // First allocation of write id (hwm+1) should add the table to the next_write_id meta table.
+         sql = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+                 + quoteString(dbName) + "," + quoteString(tblName) + ","
+                 + Long.toString(nextWriteId) + ")";
+         LOG.debug("Going to execute insert <" + sql + ">");
+         stmt.execute(sql);
+ 
+         LOG.info("WriteId state <" + validWriteIdList + "> is applied for the table: " + dbName + "." + tblName);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "replTableWriteIdState(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       replTableWriteIdState(rqst);
+     }
+ 
+     // Schedule Major compaction on all the partitions/table to clean aborted data
+     if (numAbortedWrites > 0) {
+       CompactionRequest compactRqst = new CompactionRequest(rqst.getDbName(), rqst.getTableName(),
+               CompactionType.MAJOR);
+       if (rqst.isSetPartNames()) {
+         for (String partName : rqst.getPartNames()) {
+           compactRqst.setPartitionname(partName);
+           compact(compactRqst);
+         }
+       } else {
+         compact(compactRqst);
+       }
+     }
+   }
+ 
+   private List<Long> getAbortedWriteIds(ValidWriteIdList validWriteIdList) {
+     List<Long> abortedWriteIds = new ArrayList<>();
+     for (long writeId : validWriteIdList.getInvalidWriteIds()) {
+       if (validWriteIdList.isWriteIdAborted(writeId)) {
+         abortedWriteIds.add(writeId);
+       }
+     }
+     return abortedWriteIds;
+   }
+ 
+   @Override
+   @RetrySemantics.ReadOnly
+   public GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
+           throws NoSuchTxnException, MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ValidTxnList validTxnList;
+ 
+       // We should prepare the valid write ids list based on validTxnList of current txn.
+       // If no txn exists in the caller, then they would pass null for validTxnList and so it is
+       // required to get the current state of txns to make validTxnList
+       if (rqst.isSetValidTxnList()) {
+         validTxnList = new ValidReadTxnList(rqst.getValidTxnList());
+       } else {
+         // Passing 0 for currentTxn means, this validTxnList is not wrt to any txn
+         validTxnList = TxnUtils.createValidReadTxnList(getOpenTxns(), 0);
+       }
+       try {
+         /**
+          * This runs at READ_COMMITTED for exactly the same reason as {@link #getOpenTxnsInfo()}
+          */
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // Get the valid write id list for all the tables read by the current txn
+         List<TableValidWriteIds> tblValidWriteIdsList = new ArrayList<>();
+         for (String fullTableName : rqst.getFullTableNames()) {
+           tblValidWriteIdsList.add(getValidWriteIdsForTable(stmt, fullTableName, validTxnList));
+         }
+ 
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         GetValidWriteIdsResponse owr = new GetValidWriteIdsResponse(tblValidWriteIdsList);
+         return owr;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "getValidWriteIds");
+         throw new MetaException("Unable to select from transaction database, "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return getValidWriteIds(rqst);
+     }
+   }
+ 
+   // Method to get the Valid write ids list for the given table
+   // Input fullTableName is expected to be of format <db_name>.<table_name>
+   private TableValidWriteIds getValidWriteIdsForTable(Statement stmt, String fullTableName,
+                                                ValidTxnList validTxnList) throws SQLException {
+     ResultSet rs = null;
+     String[] names = TxnUtils.getDbTableName(fullTableName);
+     try {
+       // Need to initialize to 0 to make sure if nobody modified this table, then current txn
+       // shouldn't read any data.
+       // If there is a conversion from non-acid to acid table, then by default 0 would be assigned as
+       // writeId for data from non-acid table and so writeIdHwm=0 would ensure those data are readable by any txns.
+       long writeIdHwm = 0;
+       List<Long> invalidWriteIdList = new ArrayList<>();
+       long minOpenWriteId = Long.MAX_VALUE;
+       BitSet abortedBits = new BitSet();
+       long txnHwm = validTxnList.getHighWatermark();
+ 
+       // Find the writeId high water mark based upon txnId high water mark. If found, then, need to
+       // traverse through all write Ids less than writeId HWM to make exceptions list.
+       // The writeHWM = min(NEXT_WRITE_ID.nwi_next-1, max(TXN_TO_WRITE_ID.t2w_writeid under txnHwm))
+       String s = "select max(t2w_writeid) from TXN_TO_WRITE_ID where t2w_txnid <= " + txnHwm
+               + " and t2w_database = " + quoteString(names[0])
+               + " and t2w_table = " + quoteString(names[1]);
+       LOG.debug("Going to execute query<" + s + ">");
+       rs = stmt.executeQuery(s);
+       if (rs.next()) {
+         writeIdHwm = rs.getLong(1);
+       }
+ 
+       // If no writeIds allocated by txns under txnHwm, then find writeHwm from NEXT_WRITE_ID.
+       if (writeIdHwm <= 0) {
+         // Need to subtract 1 as nwi_next would be the next write id to be allocated but we need highest
+         // allocated write id.
+         s = "select nwi_next-1 from NEXT_WRITE_ID where nwi_database = " + quoteString(names[0])
+                 + " and nwi_table = " + quoteString(names[1]);
+         LOG.debug("Going to execute query<" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long maxWriteId = rs.getLong(1);
+           if (maxWriteId > 0) {
+             writeIdHwm = (writeIdHwm > 0) ? Math.min(maxWriteId, writeIdHwm) : maxWriteId;
+           }
+         }
+       }
+ 
+       // As writeIdHwm is known, query all writeIds under the writeId HWM.
+       // If any writeId under HWM is allocated by txn > txnId HWM or belongs to open/aborted txns,
+       // then will be added to invalid list. The results should be sorted in ascending order based
+       // on write id. The sorting is needed as exceptions list in ValidWriteIdList would be looked-up
+       // using binary search.
+       s = "select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where t2w_writeid <= " + writeIdHwm
+               + " and t2w_database = " + quoteString(names[0])
+               + " and t2w_table = " + quoteString(names[1])
+               + " order by t2w_writeid asc";
+ 
+       LOG.debug("Going to execute query<" + s + ">");
+       rs = stmt.executeQuery(s);
+       while (rs.next()) {
+         long txnId = rs.getLong(1);
+         long writeId = rs.getLong(2);
+         if (validTxnList.isTxnValid(txnId)) {
+           // Skip if the transaction under evaluation is already committed.
+           continue;
+         }
+ 
+         // The current txn is either in open or aborted state.
+         // Mark the write ids state as per the txn state.
+         invalidWriteIdList.add(writeId);
+         if (validTxnList.isTxnAborted(txnId)) {
+           abortedBits.set(invalidWriteIdList.size() - 1);
+         } else {
+           minOpenWriteId = Math.min(minOpenWriteId, writeId);
+         }
+       }
+ 
+       ByteBuffer byteBuffer = ByteBuffer.wrap(abortedBits.toByteArray());
+       TableValidWriteIds owi = new TableValidWriteIds(fullTableName, writeIdHwm, invalidWriteIdList, byteBuffer);
+       if (minOpenWriteId < Long.MAX_VALUE) {
+         owi.setMinOpenWriteId(minOpenWriteId);
+       }
+       return owi;
+     } finally {
+       close(rs);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.Idempotent
+   public AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst)
+           throws NoSuchTxnException, TxnAbortedException, MetaException {
+     List<Long> txnIds;
+     String dbName = rqst.getDbName().toLowerCase();
+     String tblName = rqst.getTableName().toLowerCase();
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       List<TxnToWriteId> txnToWriteIds = new ArrayList<>();
+       List<TxnToWriteId> srcTxnToWriteIds = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         if (rqst.isSetReplPolicy()) {
+           srcTxnToWriteIds = rqst.getSrcTxnToWriteIdList();
+           List<Long> srcTxnIds = new ArrayList<>();
+           assert (rqst.isSetSrcTxnToWriteIdList());
+           assert (!rqst.isSetTxnIds());
+           assert (!srcTxnToWriteIds.isEmpty());
+ 
+           for (TxnToWriteId txnToWriteId :  srcTxnToWriteIds) {
+             srcTxnIds.add(txnToWriteId.getTxnId());
+           }
+           txnIds = getTargetTxnIdList(rqst.getReplPolicy(), srcTxnIds, stmt);
+           if (srcTxnIds.size() != txnIds.size()) {
+             LOG.warn("Target txn id is missing for source txn id : " + srcTxnIds.toString() +
+                     " and repl policy " + rqst.getReplPolicy());
+             throw new RuntimeException("This should never happen for txnIds: " + txnIds);
+           }
+         } else {
+           assert (!rqst.isSetSrcTxnToWriteIdList());
+           assert (rqst.isSetTxnIds());
+           txnIds = rqst.getTxnIds();
+         }
+ 
+         Collections.sort(txnIds); //easier to read logs and for assumption done in replication flow
+ 
+         // Check if all the input txns are in open state. Write ID should be allocated only for open transactions.
+         if (!isTxnsInOpenState(txnIds, stmt)) {
+           ensureAllTxnsValid(dbName, tblName, txnIds, stmt);
+           throw new RuntimeException("This should never happen for txnIds: " + txnIds);
+         }
+ 
+         long writeId;
+         String s;
+         long allocatedTxnsCount = 0;
+         long txnId;
+         List<String> queries = new ArrayList<>();
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
+         // Traverse the TXN_TO_WRITE_ID to see if any of the input txns already have allocated a
+         // write id for the same db.table. If yes, then need to reuse it else have to allocate new one
+         // The write id would have been already allocated in case of multi-statement txns where
+         // first write on a table will allocate write id and rest of the writes should re-use it.
+         prefix.append("select t2w_txnid, t2w_writeid from TXN_TO_WRITE_ID where"
+                         + " t2w_database = " + quoteString(dbName)
+                         + " and t2w_table = " + quoteString(tblName) + " and ");
+         suffix.append("");
+         TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix,
+                 txnIds, "t2w_txnid", false, false);
+         for (String query : queries) {
+           LOG.debug("Going to execute query <" + query + ">");
+           rs = stmt.executeQuery(query);
+           while (rs.next()) {
+             // If table write ID is already allocated for the given transaction, then just use it
+             txnId = rs.getLong(1);
+             writeId = rs.getLong(2);
+             txnToWriteIds.add(new TxnToWriteId(txnId, writeId));
+             allocatedTxnsCount++;
+             LOG.info("Reused already allocated writeID: " + writeId + " for txnId: " + txnId);
+           }
+         }
+ 
+         // Batch allocation should always happen atomically. Either write ids for all txns is allocated or none.
+         long numOfWriteIds = txnIds.size();
+         assert ((allocatedTxnsCount == 0) || (numOfWriteIds == allocatedTxnsCount));
+         if (allocatedTxnsCount == numOfWriteIds) {
+           // If all the txns in the list have pre-allocated write ids for the given table, then just return.
+           // This is for idempotent case.
+           return new AllocateTableWriteIdsResponse(txnToWriteIds);
+         }
+ 
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+ 
+         // There are some txns in the list which does not have write id allocated and hence go ahead and do it.
+         // Get the next write id for the given table and update it with new next write id.
+         // This is select for update query which takes a lock if the table entry is already there in NEXT_WRITE_ID
+         s = sqlGenerator.addForUpdateClause(
+                 "select nwi_next from NEXT_WRITE_ID where nwi_database = " + quoteString(dbName)
+                         + " and nwi_table = " + quoteString(tblName));
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           // First allocation of write id should add the table to the next_write_id meta table
+           // The initial value for write id should be 1 and hence we add 1 with number of write ids allocated here
+           writeId = 1;
+           s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+                   + quoteString(dbName) + "," + quoteString(tblName) + "," + Long.toString(numOfWriteIds + 1) + ")";
+           LOG.debug("Going to execute insert <" + s + ">");
+           stmt.execute(s);
+         } else {
+           writeId = rs.getLong(1);
+           // Update the NEXT_WRITE_ID for the given table after incrementing by number of write ids allocated
+           s = "update NEXT_WRITE_ID set nwi_next = " + (writeId + numOfWriteIds)
+                   + " where nwi_database = " + quoteString(dbName)
+                   + " and nwi_table = " + quoteString(tblName);
+           LOG.debug("Going to execute update <" + s + ">");
+           stmt.executeUpdate(s);
+         }
+ 
+         // Map the newly allocated write ids against the list of txns which doesn't have pre-allocated
+         // write ids
+         List<String> rows = new ArrayList<>();
+         for (long txn : txnIds) {
+           rows.add(txn + ", " + quoteString(dbName) + ", " + quoteString(tblName) + ", " + writeId);
+           txnToWriteIds.add(new TxnToWriteId(txn, writeId));
+           LOG.info("Allocated writeID: " + writeId + " for txnId: " + txn);
+           writeId++;
+         }
+ 
+         if (rqst.isSetReplPolicy()) {
+           int lastIdx = txnToWriteIds.size()-1;
+           if ((txnToWriteIds.get(0).getWriteId() != srcTxnToWriteIds.get(0).getWriteId()) ||
+               (txnToWriteIds.get(lastIdx).getWriteId() != srcTxnToWriteIds.get(lastIdx).getWriteId())) {
+             LOG.error("Allocated write id range {} is not matching with the input write id range {}.",
+                     txnToWriteIds, srcTxnToWriteIds);
+             throw new IllegalStateException("Write id allocation failed for: " + srcTxnToWriteIds);
+           }
+         }
+ 
+         // Insert entries to TXN_TO_WRITE_ID for newly allocated write ids
+         List<String> inserts = sqlGenerator.createInsertValuesStmt(
+                 "TXN_TO_WRITE_ID (t2w_txnid, t2w_database, t2w_table, t2w_writeid)", rows);
+         for (String insert : inserts) {
+           LOG.debug("Going to execute insert <" + insert + ">");
+           stmt.execute(insert);
+         }
+ 
+         if (transactionalListeners != null) {
+           MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                   EventMessage.EventType.ALLOC_WRITE_ID,
+                   new AllocWriteIdEvent(txnToWriteIds, rqst.getDbName(), rqst.getTableName(), null),
+                   dbConn, sqlGenerator);
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return new AllocateTableWriteIdsResponse(txnToWriteIds);
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "allocateTableWriteIds(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+                 + StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       return allocateTableWriteIds(rqst);
+     }
+   }
+   @Override
+   public void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst)
+       throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       TxnStore.MutexAPI.LockHandle handle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         handle = getMutexAPI().acquireLock(MUTEX_KEY.WriteIdAllocator.name());
+         //since this is on conversion from non-acid to acid, NEXT_WRITE_ID should not have an entry
+         //for this table.  It also has a unique index in case 'should not' is violated
+ 
+         // First allocation of write id should add the table to the next_write_id meta table
+         // The initial value for write id should be 1 and hence we add 1 with number of write ids
+         // allocated here
+         String s = "insert into NEXT_WRITE_ID (nwi_database, nwi_table, nwi_next) values ("
+             + quoteString(rqst.getDbName()) + "," + quoteString(rqst.getTblName()) + "," +
+             Long.toString(rqst.getSeeWriteId() + 1) + ")";
+         LOG.debug("Going to execute insert <" + s + ">");
+         stmt.execute(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "seedWriteIdOnAcidConversion(" + rqst + ")");
+         throw new MetaException("Unable to update transaction database "
+             + StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+         if(handle != null) {
+           handle.releaseLocks();
+         }
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       seedWriteIdOnAcidConversion(rqst);
+     }
+ 
+   }
+   @Override
+   @RetrySemantics.Idempotent
+   public void addWriteNotificationLog(AcidWriteEvent acidWriteEvent)
+           throws MetaException {
+     Connection dbConn = null;
+     try {
+       try {
+         //Idempotent case is handled by notify Event
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         MetaStoreListenerNotifier.notifyEventWithDirectSql(transactionalListeners,
+                 EventMessage.EventType.ACID_WRITE, acidWriteEvent, dbConn, sqlGenerator);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+         return;
+       } catch (SQLException e) {
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         if (isDuplicateKeyError(e)) {
+           // in case of key duplicate error, retry as it might be because of race condition
+           if (waitForRetry("addWriteNotificationLog(" + acidWriteEvent + ")", e.getMessage())) {
+             throw new RetryException();
+           }
+           retryNum = 0;
+           throw new MetaException(e.getMessage());
+         }
+         checkRetryable(dbConn, e, "addWriteNotificationLog(" + acidWriteEvent + ")");
+         throw new MetaException("Unable to add write notification event " + StringUtils.stringifyException(e));
+       } finally{
+         closeDbConn(dbConn);
+         unlockInternal();
+       }
+     } catch (RetryException e) {
+       addWriteNotificationLog(acidWriteEvent);
+     }
+   }
+ 
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void performWriteSetGC() {
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+       rs = stmt.executeQuery("select ntxn_next - 1 from NEXT_TXN_ID");
+       if(!rs.next()) {
+         throw new IllegalStateException("NEXT_TXN_ID is empty: DB is corrupted");
+       }
+       long highestAllocatedTxnId = rs.getLong(1);
+       close(rs);
+       rs = stmt.executeQuery("select min(txn_id) from TXNS where txn_state=" + quoteChar(TXN_OPEN));
+       if(!rs.next()) {
+         throw new IllegalStateException("Scalar query returned no rows?!?!!");
+       }
+       long commitHighWaterMark;//all currently open txns (if any) have txnid >= than commitHighWaterMark
+       long lowestOpenTxnId = rs.getLong(1);
+       if(rs.wasNull()) {
+         //if here then there are no Open txns and  highestAllocatedTxnId must be
+         //resolved (i.e. committed or aborted), either way
+         //there are no open txns with id <= highestAllocatedTxnId
+         //the +1 is there because "delete ..." below has < (which is correct for the case when
+         //there is an open txn
+         //Concurrency: even if new txn starts (or starts + commits) it is still true that
+         //there are no currently open txns that overlap with any committed txn with 
+         //commitId <= commitHighWaterMark (as set on next line).  So plain READ_COMMITTED is enough.
+         commitHighWaterMark = highestAllocatedTxnId + 1;
+       }
+       else {
+         commitHighWaterMark = lowestOpenTxnId;
+       }
+       int delCnt = stmt.executeUpdate("delete from WRITE_SET where ws_commit_id < " + commitHighWaterMark);
+       LOG.info("Deleted " + delCnt + " obsolete rows from WRTIE_SET");
+       dbConn.commit();
+     } catch (SQLException ex) {
+       LOG.warn("WriteSet GC failed due to " + getMessage(ex), ex);
+     }
+     finally {
+       close(rs, stmt, dbConn);
+     }
+   }
+ 
+   /**
+    * Gets the information of the first transaction for the given table
+    * after the transaction with the input id was committed (if any). 
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit(
+       String inputDbName, String inputTableName, ValidWriteIdList txnList)
+           throws MetaException {
+     final List<Long> openTxns = Arrays.asList(ArrayUtils.toObject(txnList.getInvalidWriteIds()));
+ 
+     Connection dbConn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+       stmt = dbConn.createStatement();
+       stmt.setMaxRows(1);
+       String s = "select ctc_timestamp, ctc_writeid, ctc_database, ctc_table "
+           + "from COMPLETED_TXN_COMPONENTS "
+           + "where ctc_database=" + quoteString(inputDbName) + " and ctc_table=" + quoteString(inputTableName)
+           + " and ctc_writeid > " + txnList.getHighWatermark()
+           + (txnList.getInvalidWriteIds().length == 0 ?
+               " " : " or ctc_writeid IN(" + StringUtils.join(",", openTxns) + ") ")
+           + "order by ctc_timestamp asc";
+       if (LOG.isDebugEnabled()) {
+         LOG.debug("Going to execute query <" + s + ">");
+       }
+       rs = stmt.executeQuery(s);
+ 
+       if(!rs.next()) {
+         return new BasicTxnInfo(true);
+       }
+       final BasicTxnInfo txnInfo = new BasicTxnInfo(false);
+       txnInfo.setTime(rs.getTimestamp(1, Calendar.getInstance(TimeZone.getTimeZone("UTC"))).getTime());
+       txnInfo.setTxnid(rs.getLong(2));
+       txnInfo.setDbname(rs.getString(3));
+       txnInfo.setTablename(rs.getString(4));
+       return txnInfo;
+     } catch (SQLException ex) {
+       LOG.warn("getLastCompletedTransactionForTable failed due to " + getMessage(ex), ex);
+       throw new MetaException("Unable to retrieve commits information due to " + StringUtils.stringifyException(ex));
+     } finally {
+       close(rs, stmt, dbConn);
+     }
+   }
+ 
+   /**
+    * As much as possible (i.e. in absence of retries) we want both operations to be done on the same
+    * connection (but separate transactions).  This avoid some flakiness in BONECP where if you
+    * perform an operation on 1 connection and immediately get another from the pool, the 2nd one
+    * doesn't see results of the first.
+    * 
+    * Retry-by-caller note: If the call to lock is from a transaction, then in the worst case
+    * there will be a duplicate set of locks but both sets will belong to the same txn so they 
+    * will not conflict with each other.  For locks w/o txn context (i.e. read-only query), this
+    * may lead to deadlock (at least a long wait).  (e.g. 1st call creates locks in {@code LOCK_WAITING}
+    * mode and response gets lost.  Then {@link org.apache.hadoop.hive.metastore.RetryingMetaStoreClient}
+    * retries, and enqueues another set of locks in LOCK_WAITING.  The 2nd LockResponse is delivered
+    * to the DbLockManager, which will keep dong {@link #checkLock(CheckLockRequest)} until the 1st
+    * set of locks times out.
+    */
+   @RetrySemantics.CannotRetry
+   public LockResponse lock(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException {
+     ConnectionLockIdPair connAndLockId = enqueueLockWithRetry(rqst);
+     try {
+       return checkLockWithRetry(connAndLockId.dbConn, connAndLockId.extLockId, rqst.getTxnid());
+     }
+     catch(NoSuchLockException e) {
+       // This should never happen, as we just added the lock id
+       throw new MetaException("Couldn't find a lock we just created! " + e.getMessage());
+     }
+   }
+   private static final class ConnectionLockIdPair {
+     private final Connection dbConn;
+     private final long extLockId;
+     private ConnectionLockIdPair(Connection dbConn, long extLockId) {
+       this.dbConn = dbConn;
+       this.extLockId = extLockId;
+     }
+   }
+ 
+   /**
+    * Note that by definition select for update is divorced from update, i.e. you executeQuery() to read
+    * and then executeUpdate().  One other alternative would be to actually update the row in TXNS but
+    * to the same value as before thus forcing db to acquire write lock for duration of the transaction.
+    *
+    * There is no real reason to return the ResultSet here other than to make sure the reference to it
+    * is retained for duration of intended lock scope and is not GC'd thus (unlikely) causing lock
+    * to be released.
+    * @param txnState the state this txn is expected to be in.  may be null
+    * @return null if no row was found
+    * @throws SQLException
+    * @throws MetaException
+    */
+   private ResultSet lockTransactionRecord(Statement stmt, long txnId, Character txnState) throws SQLException, MetaException {
+     String query = "select TXN_STATE from TXNS where TXN_ID = " + txnId + (txnState != null ? " AND TXN_STATE=" + quoteChar(txnState) : "");
+     ResultSet rs = stmt.executeQuery(sqlGenerator.addForUpdateClause(query));
+     if(rs.next()) {
+       return rs;
+     }
+     close(rs);
+     return null;
+   }
+ 
+   /**
+    * This enters locks into the queue in {@link #LOCK_WAITING} mode.
+    *
+    * Isolation Level Notes:
+    * 1. We use S4U (withe read_committed) to generate the next (ext) lock id.  This serializes
+    * any 2 {@code enqueueLockWithRetry()} calls.
+    * 2. We use S4U on the relevant TXNS row to block any concurrent abort/commit/etc operations
+    * @see #checkLockWithRetry(Connection, long, long)
+    */
+   private ConnectionLockIdPair enqueueLockWithRetry(LockRequest rqst) throws NoSuchTxnException, TxnAbortedException, MetaException {
+     boolean success = false;
+     Connection dbConn = null;
+     try {
+       Statement stmt = null;
+       ResultSet rs = null;
+       ResultSet lockHandle = null;
+       try {
+         lockInternal();
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         long txnid = rqst.getTxnid();
+         stmt = dbConn.createStatement();
+         if (isValidTxn(txnid)) {
+           //this also ensures that txn is still there in expected state
+           lockHandle = lockTransactionRecord(stmt, txnid, TXN_OPEN);
+           if(lockHandle == null) {
+             ensureValidTxn(dbConn, txnid, stmt);
+             shouldNeverHappen(txnid);
+           }
+         }
+         /** Get the next lock id.
+          * This has to be atomic with adding entries to HIVE_LOCK entries (1st add in W state) to prevent a race.
+          * Suppose ID gen is a separate txn and 2 concurrent lock() methods are running.  1st one generates nl_next=7,
+          * 2nd nl_next=8.  Then 8 goes first to insert into HIVE_LOCKS and acquires the locks.  Then 7 unblocks,
+          * and add it's W locks but it won't see locks from 8 since to be 'fair' {@link #checkLock(java.sql.Connection, long)}
+          * doesn't block on locks acquired later than one it's checking*/
+         String s = sqlGenerator.addForUpdateClause("select nl_next from NEXT_LOCK_ID");
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+           throw new MetaException("Transaction tables not properly " +
+             "initialized, no record found in next_lock_id");
+         }
+         long extLockId = rs.getLong(1);
+         s = "update NEXT_LOCK_ID set nl_next = " + (extLockId + 1);
+         LOG.debug("Going to execute update <" + s + ">");
+         stmt.executeUpdate(s);
+ 
+         if (txnid > 0) {
+           List<String> rows = new ArrayList<>();
+           // For each component in this lock request,
+           // add an entry to the txn_components table
+           for (LockComponent lc : rqst.getComponent()) {
+             if(lc.isSetIsTransactional() && !lc.isIsTransactional()) {
+               //we don't prevent using non-acid resources in a txn but we do lock them
+               continue;
+             }
+             boolean updateTxnComponents;
+             if(!lc.isSetOperationType()) {
+               //request came from old version of the client
+               updateTxnComponents = true;//this matches old behavior
+             }
+             else {


<TRUNCATED>

[72/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
index 0000000,bfd7141..cc417ea
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java
@@@ -1,0 -1,3326 +1,3435 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Constructor;
+ import java.lang.reflect.InvocationHandler;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.lang.reflect.Proxy;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.NoSuchElementException;
+ import java.util.Random;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ 
+ import javax.security.auth.login.LoginException;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Hive Metastore Client.
+  * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+  * are not public and can change. Hence this is marked as unstable.
+  * For users who require retry mechanism when the connection between metastore and client is
+  * broken, RetryingMetaStoreClient class should be used.
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public class HiveMetaStoreClient implements IMetaStoreClient, AutoCloseable {
+   /**
+    * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+    * implying the usage of some expanded features that require client-side support that this client
+    * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+    * capability checking is enabled (the default).
+    */
+   public final static ClientCapabilities VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+   // Test capability for tests.
+   public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+ 
+   ThriftHiveMetastore.Iface client = null;
+   private TTransport transport = null;
+   private boolean isConnected = false;
+   private URI metastoreUris[];
+   private final HiveMetaHookLoader hookLoader;
+   protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+   private String tokenStrForm;
+   private final boolean localMetaStore;
+   private final MetaStoreFilterHook filterHook;
+   private final URIResolverHook uriResolverHook;
+   private final int fileMetadataBatchSize;
+ 
+   private Map<String, String> currentMetaVars;
+ 
+   private static final AtomicInteger connCount = new AtomicInteger(0);
+ 
+   // for thrift connects
+   private int retries = 5;
+   private long retryDelaySeconds = 0;
+   private final ClientCapabilities version;
+ 
+   //copied from ErrorMsg.java
+   private static final String REPL_EVENTS_MISSING_IN_METASTORE = "Notification events are missing in the meta store.";
+   
+   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClient.class);
+ 
+   public HiveMetaStoreClient(Configuration conf) throws MetaException {
+     this(conf, null, true);
+   }
+ 
+   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+     this(conf, hookLoader, true);
+   }
+ 
+   public HiveMetaStoreClient(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+     throws MetaException {
+ 
+     this.hookLoader = hookLoader;
+     if (conf == null) {
+       conf = MetastoreConf.newMetastoreConf();
+       this.conf = conf;
+     } else {
+       this.conf = new Configuration(conf);
+     }
+     version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+     filterHook = loadFilterHooks();
+     uriResolverHook = loadUriResolverHook();
+     fileMetadataBatchSize = MetastoreConf.getIntVar(
+         conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+ 
+     String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+     localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+     if (localMetaStore) {
+       if (!allowEmbedded) {
+         throw new MetaException("Embedded metastore is not allowed here. Please configure "
+             + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+       }
+       // instantiate the metastore server handler directly instead of connecting
+       // through the network
+       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+       // Initialize materializations invalidation cache (only for local metastore)
+       MaterializationsInvalidationCache.get().init(conf, (IHMSHandler) client);
+       isConnected = true;
+       snapshotActiveConf();
+       return;
+     }
+ 
+     // get the number retries
+     retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+     retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+ 
+     // user wants file store based configuration
+     if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+       resolveUris();
+     } else {
+       LOG.error("NOT getting uris from conf");
+       throw new MetaException("MetaStoreURIs not found in conf file");
+     }
+ 
+     //If HADOOP_PROXY_USER is set in env or property,
+     //then need to create metastore client that proxies as that user.
+     String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+     String proxyUser = System.getenv(HADOOP_PROXY_USER);
+     if (proxyUser == null) {
+       proxyUser = System.getProperty(HADOOP_PROXY_USER);
+     }
+     //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+     if(proxyUser != null) {
+       LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+           + "token for HiveMetaStore connection.");
+       try {
+         UserGroupInformation.getLoginUser().getRealUser().doAs(
+             new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 open();
+                 return null;
+               }
+             });
+         String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+         String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+         SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+             delegationTokenPropString);
+         MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+         close();
+       } catch (Exception e) {
+         LOG.error("Error while setting delegation token for " + proxyUser, e);
+         if(e instanceof MetaException) {
+           throw (MetaException)e;
+         } else {
+           throw new MetaException(e.getMessage());
+         }
+       }
+     }
+     // finally open the store
+     open();
+   }
+ 
+   private void resolveUris() throws MetaException {
+     String metastoreUrisString[] =  MetastoreConf.getVar(conf,
+             ConfVars.THRIFT_URIS).split(",");
+ 
+     List<URI> metastoreURIArray = new ArrayList<URI>();
+     try {
+       int i = 0;
+       for (String s : metastoreUrisString) {
+         URI tmpUri = new URI(s);
+         if (tmpUri.getScheme() == null) {
+           throw new IllegalArgumentException("URI: " + s
+                   + " does not have a scheme");
+         }
+         if (uriResolverHook != null) {
+           metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri));
+         } else {
+           metastoreURIArray.add(new URI(
+                   tmpUri.getScheme(),
+                   tmpUri.getUserInfo(),
+                   HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+                   tmpUri.getPort(),
+                   tmpUri.getPath(),
+                   tmpUri.getQuery(),
+                   tmpUri.getFragment()
+           ));
+         }
+       }
+       metastoreUris = new URI[metastoreURIArray.size()];
+       for (int j = 0; j < metastoreURIArray.size(); j++) {
+         metastoreUris[j] = metastoreURIArray.get(j);
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         List uriList = Arrays.asList(metastoreUris);
+         Collections.shuffle(uriList);
+         metastoreUris = (URI[]) uriList.toArray();
+       }
+     } catch (IllegalArgumentException e) {
+       throw (e);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+   }
+ 
+ 
+   private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+     Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+         getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+             MetaStoreFilterHook.class);
+     String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+     try {
+       Constructor<? extends MetaStoreFilterHook> constructor =
+           authProviderClass.getConstructor(Configuration.class);
+       return constructor.newInstance(conf);
+     } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+       throw new IllegalStateException(msg + e.getMessage(), e);
+     }
+   }
+ 
+   //multiple clients may initialize the hook at the same time
+   synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException {
+ 
+     String uriResolverClassName =
+             MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER);
+     if (uriResolverClassName.equals("")) {
+       return null;
+     } else {
+       LOG.info("Loading uri resolver" + uriResolverClassName);
+       try {
+         Class<?> uriResolverClass = Class.forName(uriResolverClassName, true,
+                 JavaUtils.getClassLoader());
+         return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null);
+       } catch (Exception e) {
+         LOG.error("Exception loading uri resolver hook" + e);
+         return null;
+       }
+     }
+   }
+ 
+   /**
+    * Swaps the first element of the metastoreUris array with a random element from the
+    * remainder of the array.
+    */
+   private void promoteRandomMetaStoreURI() {
+     if (metastoreUris.length <= 1) {
+       return;
+     }
+     Random rng = new Random();
+     int index = rng.nextInt(metastoreUris.length - 1) + 1;
+     URI tmp = metastoreUris[0];
+     metastoreUris[0] = metastoreUris[index];
+     metastoreUris[index] = tmp;
+   }
+ 
+   @VisibleForTesting
+   public TTransport getTTransport() {
+     return transport;
+   }
+ 
+   @Override
+   public boolean isLocalMetaStore() {
+     return localMetaStore;
+   }
+ 
+   @Override
+   public boolean isCompatibleWith(Configuration conf) {
+     // Make a copy of currentMetaVars, there is a race condition that
+ 	// currentMetaVars might be changed during the execution of the method
+     Map<String, String> currentMetaVarsCopy = currentMetaVars;
+     if (currentMetaVarsCopy == null) {
+       return false; // recreate
+     }
+     boolean compatible = true;
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       // Since metaVars are all of different types, use string for comparison
+       String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+       String newVar = MetastoreConf.getAsString(conf, oneVar);
+       if (oldVar == null ||
+           (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+         LOG.info("Mestastore configuration " + oneVar.toString() +
+             " changed from " + oldVar + " to " + newVar);
+         compatible = false;
+       }
+     }
+     return compatible;
+   }
+ 
+   @Override
+   public void setHiveAddedJars(String addedJars) {
+     MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+   }
+ 
+   @Override
+   public void reconnect() throws MetaException {
+     if (localMetaStore) {
+       // For direct DB connections we don't yet support reestablishing connections.
+       throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+           " at the client level.");
+     } else {
+       close();
+ 
+       if (uriResolverHook != null) {
+         //for dynamic uris, re-lookup if there are new metastore locations
+         resolveUris();
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         // Swap the first element of the metastoreUris[] with a random element from the rest
+         // of the array. Rationale being that this method will generally be called when the default
+         // connection has died and the default connection is likely to be the first array element.
+         promoteRandomMetaStoreURI();
+       }
+       open();
+     }
+   }
+ 
+   @Override
+   public void alter_table(String dbname, String tbl_name, Table new_tbl) throws TException {
+     alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+   }
+ 
+   @Override
+   public void alter_table(String defaultDatabaseName, String tblName, Table table,
+                           boolean cascade) throws TException {
+     EnvironmentContext environmentContext = new EnvironmentContext();
+     if (cascade) {
+       environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+     }
+     alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+   }
+ 
+   @Override
+   public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+     HiveMetaHook hook = getHook(new_tbl);
+     if (hook != null) {
+       hook.preAlterTable(new_tbl, envContext);
+     }
+     client.alter_table_with_environment_context(prependCatalogToDbName(dbname, conf),
+         tbl_name, new_tbl, envContext);
+   }
+ 
+   @Override
+   public void alter_table(String catName, String dbName, String tblName, Table newTable,
+                          EnvironmentContext envContext) throws TException {
+     client.alter_table_with_environment_context(prependCatalogToDbName(catName,
+         dbName, conf), tblName, newTable, envContext);
+   }
+ 
+   @Override
+   public void renamePartition(final String dbname, final String tableName, final List<String> part_vals,
+                               final Partition newPart) throws TException {
+     renamePartition(getDefaultCatalog(conf), dbname, tableName, part_vals, newPart);
+   }
+ 
+   @Override
+   public void renamePartition(String catName, String dbname, String tableName, List<String> part_vals,
+                               Partition newPart) throws TException {
+     client.rename_partition(prependCatalogToDbName(catName, dbname, conf), tableName, part_vals, newPart);
+ 
+   }
+ 
+   private void open() throws MetaException {
+     isConnected = false;
+     TTransportException tte = null;
+     boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+     boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+     boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+     boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+     int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+ 
+     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+       for (URI store : metastoreUris) {
+         LOG.info("Trying to connect to metastore with URI " + store);
+ 
+         try {
+           if (useSSL) {
+             try {
+               String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+               if (trustStorePath.isEmpty()) {
+                 throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                     + " Not configured for SSL connection");
+               }
+               String trustStorePassword =
+                   MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+ 
+               // Create an SSL socket and connect
+               transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                   trustStorePath, trustStorePassword );
+               LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+             } catch(IOException e) {
+               throw new IllegalArgumentException(e);
+             } catch(TTransportException e) {
+               tte = e;
+               throw new MetaException(e.toString());
+             }
+           } else {
+             transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+           }
+ 
+           if (useSasl) {
+             // Wrap thrift connection with SASL for secure connection.
+             try {
+               HadoopThriftAuthBridge.Client authBridge =
+                 HadoopThriftAuthBridge.getBridge().createClient();
+ 
+               // check if we should use delegation tokens to authenticate
+               // the call below gets hold of the tokens if they are set up by hadoop
+               // this should happen on the map/reduce tasks if the client added the
+               // tokens into hadoop's credential store in the front end during job
+               // submission.
+               String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+               // tokenSig could be null
+               tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+ 
+               if(tokenStrForm != null) {
+                 LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                 // authenticate using delegation tokens via the "DIGEST" mechanism
+                 transport = authBridge.createClientTransport(null, store.getHost(),
+                     "DIGEST", tokenStrForm, transport,
+                         MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               } else {
+                 LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                 String principalConfig =
+                     MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                 transport = authBridge.createClientTransport(
+                     principalConfig, store.getHost(), "KERBEROS", null,
+                     transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               }
+             } catch (IOException ioe) {
+               LOG.error("Couldn't create client transport", ioe);
+               throw new MetaException(ioe.toString());
+             }
+           } else {
+             if (useFramedTransport) {
+               transport = new TFramedTransport(transport);
+             }
+           }
+ 
+           final TProtocol protocol;
+           if (useCompactProtocol) {
+             protocol = new TCompactProtocol(transport);
+           } else {
+             protocol = new TBinaryProtocol(transport);
+           }
+           client = new ThriftHiveMetastore.Client(protocol);
+           try {
+             if (!transport.isOpen()) {
+               transport.open();
+               LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+             }
+             isConnected = true;
+           } catch (TTransportException e) {
+             tte = e;
+             if (LOG.isDebugEnabled()) {
+               LOG.warn("Failed to connect to the MetaStore Server...", e);
+             } else {
+               // Don't print full exception trace if DEBUG is not on.
+               LOG.warn("Failed to connect to the MetaStore Server...");
+             }
+           }
+ 
+           if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+             // Call set_ugi, only in unsecure mode.
+             try {
+               UserGroupInformation ugi = SecurityUtils.getUGI();
+               client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+             } catch (LoginException e) {
+               LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                        "Continuing without it.", e);
+             } catch (IOException e) {
+               LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                   "Continuing without it.", e);
+             } catch (TException e) {
+               LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                   + "Continuing without it.", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.error("Unable to connect to metastore with URI " + store
+                     + " in attempt " + attempt, e);
+         }
+         if (isConnected) {
+           break;
+         }
+       }
+       // Wait before launching the next round of connection retries.
+       if (!isConnected && retryDelaySeconds > 0) {
+         try {
+           LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+           Thread.sleep(retryDelaySeconds * 1000);
+         } catch (InterruptedException ignore) {}
+       }
+     }
+ 
+     if (!isConnected) {
+       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+         " Most recent failure: " + StringUtils.stringifyException(tte));
+     }
+ 
+     snapshotActiveConf();
+ 
+     LOG.info("Connected to metastore.");
+   }
+ 
+   private void snapshotActiveConf() {
+     currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+     }
+   }
+ 
+   @Override
+   public String getTokenStrForm() throws IOException {
+     return tokenStrForm;
+    }
+ 
+   @Override
+   public void close() {
+     isConnected = false;
+     currentMetaVars = null;
+     try {
+       if (null != client) {
+         client.shutdown();
+       }
+     } catch (TException e) {
+       LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+     }
+     // Transport would have got closed via client.shutdown(), so we dont need this, but
+     // just in case, we make this call.
+     if ((transport != null) && transport.isOpen()) {
+       transport.close();
+       LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+     }
+   }
+ 
+   @Override
+   public void setMetaConf(String key, String value) throws TException {
+     client.setMetaConf(key, value);
+   }
+ 
+   @Override
+   public String getMetaConf(String key) throws TException {
+     return client.getMetaConf(key);
+   }
+ 
+   @Override
+   public void createCatalog(Catalog catalog) throws TException {
+     client.create_catalog(new CreateCatalogRequest(catalog));
+   }
+ 
+   @Override
+   public void alterCatalog(String catalogName, Catalog newCatalog) throws TException {
+     client.alter_catalog(new AlterCatalogRequest(catalogName, newCatalog));
+   }
+ 
+   @Override
+   public Catalog getCatalog(String catName) throws TException {
+     GetCatalogResponse rsp = client.get_catalog(new GetCatalogRequest(catName));
+     return rsp == null ? null : filterHook.filterCatalog(rsp.getCatalog());
+   }
+ 
+   @Override
+   public List<String> getCatalogs() throws TException {
+     GetCatalogsResponse rsp = client.get_catalogs();
+     return rsp == null ? null : filterHook.filterCatalogs(rsp.getNames());
+   }
+ 
+   @Override
+   public void dropCatalog(String catName) throws TException {
+     client.drop_catalog(new DropCatalogRequest(catName));
+   }
+ 
+   /**
+    * @param new_part
+    * @return the added partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public Partition add_partition(Partition new_part) throws TException {
+     return add_partition(new_part, null);
+   }
+ 
+   public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+       throws TException {
+     if (new_part != null && !new_part.isSetCatName()) {
+       new_part.setCatName(getDefaultCatalog(conf));
+     }
+     Partition p = client.add_partition_with_environment_context(new_part, envContext);
+     return deepCopy(p);
+   }
+ 
+   /**
+    * @param new_parts
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+    */
+   @Override
+   public int add_partitions(List<Partition> new_parts) throws TException {
+     if (new_parts == null || new_parts.contains(null)) {
+       throw new MetaException("Partitions cannot be null.");
+     }
+     if (new_parts != null && !new_parts.isEmpty() && !new_parts.get(0).isSetCatName()) {
+       final String defaultCat = getDefaultCatalog(conf);
+       new_parts.forEach(p -> p.setCatName(defaultCat));
+     }
+     return client.add_partitions(new_parts);
+   }
+ 
+   @Override
+   public List<Partition> add_partitions(
+       List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+     if (parts == null || parts.contains(null)) {
+       throw new MetaException("Partitions cannot be null.");
+     }
+     if (parts.isEmpty()) {
+       return needResults ? new ArrayList<>() : null;
+     }
+     Partition part = parts.get(0);
+     // Have to set it for each partition too
+     if (!part.isSetCatName()) {
+       final String defaultCat = getDefaultCatalog(conf);
+       parts.forEach(p -> p.setCatName(defaultCat));
+     }
+     AddPartitionsRequest req = new AddPartitionsRequest(
+         part.getDbName(), part.getTableName(), parts, ifNotExists);
+     req.setCatName(part.isSetCatName() ? part.getCatName() : getDefaultCatalog(conf));
+     req.setNeedResult(needResults);
+     AddPartitionsResult result = client.add_partitions_req(req);
+     return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+   }
+ 
+   @Override
+   public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+     if (partitionSpec == null) {
+       throw new MetaException("PartitionSpec cannot be null.");
+     }
+     if (partitionSpec.getCatName() == null) {
+       partitionSpec.setCatName(getDefaultCatalog(conf));
+     }
+     return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+   }
+ 
+   @Override
+   public Partition appendPartition(String db_name, String table_name,
+       List<String> part_vals) throws TException {
+     return appendPartition(getDefaultCatalog(conf), db_name, table_name, part_vals);
+   }
+ 
+   @Override
+   public Partition appendPartition(String dbName, String tableName, String partName)
+       throws TException {
+     return appendPartition(getDefaultCatalog(conf), dbName, tableName, partName);
+   }
+ 
+   @Override
+   public Partition appendPartition(String catName, String dbName, String tableName,
+                                    String name) throws TException {
+     Partition p = client.append_partition_by_name(prependCatalogToDbName(
+         catName, dbName, conf), tableName, name);
+     return deepCopy(p);
+   }
+ 
+   @Override
+   public Partition appendPartition(String catName, String dbName, String tableName,
+                                    List<String> partVals) throws TException {
+     Partition p = client.append_partition(prependCatalogToDbName(
+         catName, dbName, conf), tableName, partVals);
+     return deepCopy(p);
+   }
+ 
+   @Deprecated
+   public Partition appendPartition(String dbName, String tableName, List<String> partVals,
+                                    EnvironmentContext ec) throws TException {
+     return client.append_partition_with_environment_context(prependCatalogToDbName(dbName, conf),
+         tableName, partVals, ec).deepCopy();
+   }
+ 
+   /**
+    * Exchange the partition between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partition after exchanging
+    */
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws TException {
+     return exchange_partition(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable,
+         getDefaultCatalog(conf), destDb, destinationTableName);
+   }
+ 
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs, String sourceCat,
+                                       String sourceDb, String sourceTable, String destCat,
+                                       String destDb, String destTableName) throws TException {
+     return client.exchange_partition(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf),
+         sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName);
+   }
+ 
+   /**
+    * Exchange the partitions between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partitions after exchanging
+    */
+   @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws TException {
+     return exchange_partitions(partitionSpecs, getDefaultCatalog(conf), sourceDb, sourceTable,
+         getDefaultCatalog(conf), destDb, destinationTableName);
+   }
+ 
+   @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String dbName, String tableName, List<String> partNames, List<String> colNames,
++      long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    return getPartitionColumnStatistics(getDefaultCatalog(conf), dbName, tableName,
++        partNames, colNames, txnId, validWriteIdList);
++  }
++
++  @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String catName, String dbName, String tableName, List<String> partNames,
++      List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    PartitionsStatsRequest rqst = new PartitionsStatsRequest(dbName, tableName, colNames,
++        partNames);
++    rqst.setCatName(catName);
++    rqst.setTxnId(txnId);
++    rqst.setValidWriteIdList(validWriteIdList);
++    return client.get_partitions_statistics_req(rqst).getPartStats();
++  }
++
++  @Override
++  public AggrStats getAggrColStatsFor(String dbName, String tblName, List<String> colNames,
++      List<String> partNames, long txnId, String writeIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    return getAggrColStatsFor(getDefaultCatalog(conf), dbName, tblName, colNames,
++        partNames, txnId, writeIdList);  }
++
++  @Override
++  public AggrStats getAggrColStatsFor(String catName, String dbName, String tblName, List<String> colNames,
++      List<String> partNames, long txnId, String writeIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    if (colNames.isEmpty() || partNames.isEmpty()) {
++      LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side.");
++      return new AggrStats(new ArrayList<>(),0); // Nothing to aggregate
++    }
++    PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames);
++    req.setCatName(catName);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    return client.get_aggr_stats_for(req);
++  }
++
++  @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs, String sourceCat,
+                                              String sourceDb, String sourceTable, String destCat,
+                                              String destDb, String destTableName) throws TException {
+     return client.exchange_partitions(partitionSpecs, prependCatalogToDbName(sourceCat, sourceDb, conf),
+         sourceTable, prependCatalogToDbName(destCat, destDb, conf), destTableName);
+   }
+ 
+   @Override
+   public void validatePartitionNameCharacters(List<String> partVals)
+       throws TException, MetaException {
+     client.partition_name_has_valid_characters(partVals, true);
+   }
+ 
+   /**
+    * Create a new Database
+    * @param db
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+    */
+   @Override
+   public void createDatabase(Database db)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+     if (!db.isSetCatalogName()) {
+       db.setCatalogName(getDefaultCatalog(conf));
+     }
+     client.create_database(db);
+   }
+ 
+   /**
+    * @param tbl
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     createTable(tbl, null);
+   }
+ 
+   public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     if (!tbl.isSetCatName()) {
+       tbl.setCatName(getDefaultCatalog(conf));
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       create_table_with_environment_context(tbl, envContext);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     }
+     finally {
+       if (!success && (hook != null)) {
+         try {
+           hook.rollbackCreateTable(tbl);
+         } catch (Exception e){
+           LOG.error("Create rollback failed with", e);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+     List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+     List<SQLUniqueConstraint> uniqueConstraints,
+     List<SQLNotNullConstraint> notNullConstraints,
+     List<SQLDefaultConstraint> defaultConstraints,
+     List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, InvalidObjectException,
+         MetaException, NoSuchObjectException, TException {
+ 
+     if (!tbl.isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       tbl.setCatName(defaultCat);
+       if (primaryKeys != null) {
+         primaryKeys.forEach(pk -> pk.setCatName(defaultCat));
+       }
+       if (foreignKeys != null) {
+         foreignKeys.forEach(fk -> fk.setCatName(defaultCat));
+       }
+       if (uniqueConstraints != null) {
+         uniqueConstraints.forEach(uc -> uc.setCatName(defaultCat));
+       }
+       if (notNullConstraints != null) {
+         notNullConstraints.forEach(nn -> nn.setCatName(defaultCat));
+       }
+       if (defaultConstraints != null) {
+         defaultConstraints.forEach(def -> def.setCatName(defaultCat));
+       }
+       if (checkConstraints != null) {
+         checkConstraints.forEach(cc -> cc.setCatName(defaultCat));
+       }
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+           uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackCreateTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName, String constraintName)
+       throws TException {
+     dropConstraint(getDefaultCatalog(conf), dbName, tableName, constraintName);
+   }
+ 
+   @Override
+   public void dropConstraint(String catName, String dbName, String tableName, String constraintName)
+       throws TException {
+     DropConstraintRequest rqst = new DropConstraintRequest(dbName, tableName, constraintName);
+     rqst.setCatName(catName);
+     client.drop_constraint(rqst);
+   }
+ 
+   @Override
+   public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws TException {
+     if (!primaryKeyCols.isEmpty() && !primaryKeyCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       primaryKeyCols.forEach(pk -> pk.setCatName(defaultCat));
+     }
+     client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+   }
+ 
+   @Override
+   public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws TException {
+     if (!foreignKeyCols.isEmpty() && !foreignKeyCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       foreignKeyCols.forEach(fk -> fk.setCatName(defaultCat));
+     }
+     client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+   }
+ 
+   @Override
+   public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     if (!uniqueConstraintCols.isEmpty() && !uniqueConstraintCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       uniqueConstraintCols.forEach(uc -> uc.setCatName(defaultCat));
+     }
+     client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+   }
+ 
+   @Override
+   public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     if (!notNullConstraintCols.isEmpty() && !notNullConstraintCols.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       notNullConstraintCols.forEach(nn -> nn.setCatName(defaultCat));
+     }
+     client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+   }
+ 
+   @Override
+   public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     if (!defaultConstraints.isEmpty() && !defaultConstraints.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       defaultConstraints.forEach(def -> def.setCatName(defaultCat));
+     }
+     client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints));
+   }
+ 
+   @Override
+   public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     if (!checkConstraints.isEmpty() && !checkConstraints.get(0).isSetCatName()) {
+       String defaultCat = getDefaultCatalog(conf);
+       checkConstraints.forEach(cc -> cc.setCatName(defaultCat));
+     }
+     client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints));
+   }
+ 
+   /**
+    * @param type
+    * @return true or false
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+    */
+   public boolean createType(Type type) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, TException {
+     return client.create_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @throws NoSuchObjectException
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+    */
+   @Override
+   public void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, true, false, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(getDefaultCatalog(conf), name, deleteData, ignoreUnknownDb, cascade);
+   }
+ 
+   @Override
+   public void dropDatabase(String catalogName, String dbName, boolean deleteData,
+                            boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     try {
+       getDatabase(catalogName, dbName);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownDb) {
+         throw e;
+       }
+       return;
+     }
+ 
+     if (cascade) {
+       // Note that this logic may drop some of the tables of the database
+       // even if the drop database fail for any reason
+       // TODO: Fix this
+       List<String> materializedViews = getTables(dbName, ".*", TableType.MATERIALIZED_VIEW);
+       for (String table : materializedViews) {
+         // First we delete the materialized views
+         dropTable(dbName, table, deleteData, true);
+       }
+       List<String> tableList = getAllTables(dbName);
+       for (String table : tableList) {
+         // Now we delete the rest of tables
+         try {
+           // Subclasses can override this step (for example, for temporary tables)
+           dropTable(dbName, table, deleteData, true);
+         } catch (UnsupportedOperationException e) {
+           // Ignore Index tables, those will be dropped with parent tables
+         }
+       }
+     }
+     client.drop_database(prependCatalogToDbName(catalogName, dbName, conf), deleteData, cascade);
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+       throws TException {
+     return dropPartition(getDefaultCatalog(conf), dbName, tableName, partName, deleteData);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name, String name,
+                                boolean deleteData) throws TException {
+     return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, name, deleteData, null);
+   }
+ 
+   private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+     Map<String, String> warehouseOptions = new HashMap<>();
+     warehouseOptions.put("ifPurge", "TRUE");
+     return new EnvironmentContext(warehouseOptions);
+   }
+ 
+   // A bunch of these are in HiveMetaStoreClient but not IMetaStoreClient.  I have marked these
+   // as deprecated and not updated them for the catalogs.  If we really want to support them we
+   // should add them to IMetaStoreClient.
+ 
+   @Deprecated
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       EnvironmentContext env_context) throws TException {
+     return client.drop_partition_with_environment_context(prependCatalogToDbName(db_name, conf),
+         tbl_name, part_vals, true, env_context);
+   }
+ 
+   @Deprecated
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean dropData,
+                                EnvironmentContext ec) throws TException {
+     return client.drop_partition_by_name_with_environment_context(prependCatalogToDbName(dbName, conf),
+         tableName, partName, dropData, ec);
+   }
+ 
+   @Deprecated
+   public boolean dropPartition(String dbName, String tableName, List<String> partVals)
+       throws TException {
+     return client.drop_partition(prependCatalogToDbName(dbName, conf), tableName, partVals, true);
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws TException {
+     return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals,
+         PartitionDropOptions.instance().deleteData(deleteData));
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name,
+                                List<String> part_vals, boolean deleteData) throws TException {
+     return dropPartition(catName, db_name, tbl_name, part_vals, PartitionDropOptions.instance()
+             .deleteData(deleteData));
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+                                List<String> part_vals, PartitionDropOptions options) throws TException {
+     return dropPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals, options);
+   }
+ 
+   @Override
+   public boolean dropPartition(String catName, String db_name, String tbl_name,
+                                List<String> part_vals, PartitionDropOptions options)
+       throws TException {
+     if (options == null) {
+       options = PartitionDropOptions.instance();
+     }
+     if (part_vals != null) {
+       for (String partVal : part_vals) {
+         if (partVal == null) {
+           throw new MetaException("The partition value must not be null.");
+         }
+       }
+     }
+     return client.drop_partition_with_environment_context(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, part_vals, options.deleteData,
+         options.purgeData ? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs,
+                                         PartitionDropOptions options)
+       throws TException {
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs, options);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists)
+                                               .returnResults(needResult));
+ 
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+     // By default, we need the results from dropPartitions();
+     return dropPartitions(getDefaultCatalog(conf), dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists));
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String catName, String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs,
+                                         PartitionDropOptions options) throws TException {
+     RequestPartsSpec rps = new RequestPartsSpec();
+     List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+     for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+       DropPartitionsExpr dpe = new DropPartitionsExpr();
+       dpe.setExpr(partExpr.getSecond());
+       dpe.setPartArchiveLevel(partExpr.getFirst());
+       exprs.add(dpe);
+     }
+     rps.setExprs(exprs);
+     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+     req.setCatName(catName);
+     req.setDeleteData(options.deleteData);
+     req.setNeedResult(options.returnResults);
+     req.setIfExists(options.ifExists);
+     if (options.purgeData) {
+       LOG.info("Dropped partitions will be purged!");
+       req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+     }
+     return client.drop_partitions_req(req).getPartitions();
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, null);
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge) throws TException {
+     dropTable(getDefaultCatalog(conf), dbname, name, deleteData, ignoreUnknownTab, ifPurge);
+   }
+ 
+   @Override
+   public void dropTable(String dbname, String name) throws TException {
+     dropTable(getDefaultCatalog(conf), dbname, name, true, true, null);
+   }
+ 
+   @Override
+   public void dropTable(String catName, String dbName, String tableName, boolean deleteData,
+                         boolean ignoreUnknownTable, boolean ifPurge) throws TException {
+     //build new environmentContext with ifPurge;
+     EnvironmentContext envContext = null;
+     if(ifPurge){
+       Map<String, String> warehouseOptions;
+       warehouseOptions = new HashMap<>();
+       warehouseOptions.put("ifPurge", "TRUE");
+       envContext = new EnvironmentContext(warehouseOptions);
+     }
+     dropTable(catName, dbName, tableName, deleteData, ignoreUnknownTable, envContext);
+ 
+   }
+ 
+   /**
+    * Drop the table and choose whether to: delete the underlying table data;
+    * throw if the table doesn't exist; save the data in the trash.
+    *
+    * @param catName catalog name
+    * @param dbname database name
+    * @param name table name
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param envContext
+    *          for communicating with thrift
+    * @throws MetaException
+    *           could not drop table properly
+    * @throws NoSuchObjectException
+    *           the table wasn't found
+    * @throws TException
+    *           a thrift communication error occurred
+    * @throws UnsupportedOperationException
+    *           dropping an index table is not allowed
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+    *      java.lang.String, boolean)
+    */
+   public void dropTable(String catName, String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     Table tbl;
+     try {
+       tbl = getTable(catName, dbname, name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+       return;
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preDropTable(tbl);
+     }
+     boolean success = false;
+     try {
+       drop_table_with_environment_context(catName, dbname, name, deleteData, envContext);
+       if (hook != null) {
+         hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+       }
+       success=true;
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackDropTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void truncateTable(String dbName, String tableName, List<String> partNames) throws TException {
+     truncateTable(getDefaultCatalog(conf), dbName, tableName, partNames);
+   }
+ 
+   @Override
+   public void truncateTable(String catName, String dbName, String tableName, List<String> partNames)
+       throws TException {
+     client.truncate_table(prependCatalogToDbName(catName, dbName, conf), tableName, partNames);
+   }
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   @Override
+   public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+     return client.cm_recycle(request);
+   }
+ 
+   /**
+    * @param type
+    * @return true if the type is dropped
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+    */
+   public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @return map of types
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+    */
+   public Map<String, Type> getTypeAll(String name) throws MetaException,
+       TException {
+     Map<String, Type> result = null;
+     Map<String, Type> fromClient = client.get_type_all(name);
+     if (fromClient != null) {
+       result = new LinkedHashMap<>();
+       for (String key : fromClient.keySet()) {
+         result.put(key, deepCopy(fromClient.get(key)));
+       }
+     }
+     return result;
+   }
+ 
+   @Override
+   public List<String> getDatabases(String databasePattern) throws TException {
+     return getDatabases(getDefaultCatalog(conf), databasePattern);
+   }
+ 
+   @Override
+   public List<String> getDatabases(String catName, String databasePattern) throws TException {
+     return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(
+         catName, databasePattern, conf)));
+   }
+ 
+   @Override
+   public List<String> getAllDatabases() throws TException {
+     return getAllDatabases(getDefaultCatalog(conf));
+   }
+ 
+   @Override
+   public List<String> getAllDatabases(String catName) throws TException {
+     return filterHook.filterDatabases(client.get_databases(prependCatalogToDbName(catName, null, conf)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name, short max_parts)
+       throws TException {
+     return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                         int max_parts) throws TException {
+     List<Partition> parts = client.get_partitions(prependCatalogToDbName(catName, db_name, conf),
+         tbl_name, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+     return listPartitionSpecs(getDefaultCatalog(conf), dbName, tableName, maxParts);
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String catName, String dbName, String tableName,
+                                                int maxParts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_partitions_pspec(prependCatalogToDbName(catName, dbName, conf), tableName, maxParts)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+                                         List<String> part_vals, short max_parts) throws TException {
+     return listPartitions(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String catName, String db_name, String tbl_name,
+                                         List<String> part_vals, int max_parts) throws TException {
+     List<Partition> parts = client.get_partitions_ps(prependCatalogToDbName(catName, db_name, conf),
+         tbl_name, part_vals, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name,
+                                                     short max_parts, String user_name,
+                                                     List<String> group_names) throws TException {
+     return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, max_parts, user_name,
+         group_names);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                                     int maxParts, String userName,
+                                                     List<String> groupNames) throws TException {
+     List<Partition> parts = client.get_partitions_with_auth(prependCatalogToDbName(catName,
+         dbName, conf), tableName, shrinkMaxtoShort(maxParts), userName, groupNames);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name, String tbl_name,
+                                                     List<String> part_vals, short max_parts,
+                                                     String user_name, List<String> group_names)
+       throws TException {
+     return listPartitionsWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts,
+         user_name, group_names);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String catName, String dbName, String tableName,
+                                                     List<String> partialPvals, int maxParts,
+                                                     String userName, List<String> groupNames)
+       throws TException {
+     List<Partition> parts = client.get_partitions_ps_with_auth(prependCatalogToDbName(catName,
+         dbName, conf), tableName, partialPvals, shrinkMaxtoShort(maxParts), userName, groupNames);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws TException {
+     return listPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts);
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsByFilter(String catName, String db_name, String tbl_name,
+                                                 String filter, int max_parts) throws TException {
+     List<Partition> parts =client.get_partitions_by_filter(prependCatalogToDbName(
+         catName, db_name, conf), tbl_name, filter, shrinkMaxtoShort(max_parts));
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                        String filter, int max_parts)
+       throws TException {
+     return listPartitionSpecsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter, max_parts);
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String catName, String db_name,
+                                                        String tbl_name, String filter,
+                                                        int max_parts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_part_specs_by_filter(prependCatalogToDbName(catName, db_name, conf), tbl_name, filter,
+             max_parts)));
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+                                       String default_partition_name, short max_parts,
+                                       List<Partition> result) throws TException {
+     return listPartitionsByExpr(getDefaultCatalog(conf), db_name, tbl_name, expr,
+         default_partition_name, max_parts, result);
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String catName, String db_name, String tbl_name, byte[] expr,
+       String default_partition_name, int max_parts, List<Partition> result)
+           throws TException {
+     assert result != null;
+     PartitionsByExprRequest req = new PartitionsByExprRequest(
+         db_name, tbl_name, ByteBuffer.wrap(expr));
+     if (default_partition_name != null) {
+       req.setDefaultPartitionName(default_partition_name);
+     }
+     if (max_parts >= 0) {
+       req.setMaxParts(shrinkMaxtoShort(max_parts));
+     }
+     PartitionsByExprResult r;
+     try {
+       r = client.get_partitions_by_expr(req);
+     } catch (TApplicationException te) {
+       // TODO: backward compat for Hive <= 0.12. Can be removed later.
+       if (te.getType() != TApplicationException.UNKNOWN_METHOD
+           && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+         throw te;
+       }
+       throw new IncompatibleMetastoreException(
+           "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+     }
+     r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+     // TODO: in these methods, do we really need to deepcopy?
+     deepCopyPartitions(r.getPartitions(), result);
+     return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+   }
+ 
+   @Override
+   public Database getDatabase(String name) throws TException {
+     return getDatabase(getDefaultCatalog(conf), name);
+   }
+ 
+   @Override
+   public Database getDatabase(String catalogName, String databaseName) throws TException {
+     Database d = client.get_database(prependCatalogToDbName(catalogName, databaseName, conf));
+     return deepCopy(filterHook.filterDatabase(d));
+   }
+ 
+   @Override
+   public Partition getPartition(String db_name, String tbl_name, List<String> part_vals)
+       throws TException {
+     return getPartition(getDefaultCatalog(conf), db_name, tbl_name, part_vals);
+   }
+ 
+   @Override
+   public Partition getPartition(String catName, String dbName, String tblName,
+                                 List<String> partVals) throws TException {
+     Partition p = client.get_partition(prependCatalogToDbName(catName, dbName, conf), tblName, partVals);
+     return deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws TException {
+     return getPartitionsByNames(getDefaultCatalog(conf), db_name, tbl_name, part_names);
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String catName, String db_name, String tbl_name,
+                                               List<String> part_names) throws TException {
+     List<Partition> parts =
+         client.get_partitions_by_names(prependCatalogToDbName(catName, db_name, conf), tbl_name, part_names);
+     return deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException {
+     if (!request.isSetCatName()) {
+       request.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_partition_values(request);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+       List<String> part_vals, String user_name, List<String> group_names)
+       throws TException {
+     return getPartitionWithAuthInfo(getDefaultCatalog(conf), db_name, tbl_name, part_vals,
+         user_name, group_names);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String catName, String dbName, String tableName,
+                                             List<String> pvals, String userName,
+                                             List<String> groupNames) throws TException {
+     Partition p = client.get_partition_with_auth(prependCatalogToDbName(catName, dbName, conf), tableName,
+         pvals, userName, groupNames);
+     return deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public Table getTable(String dbname, String name) throws TException {
+     return getTable(getDefaultCatalog(conf), dbname, name);
+   }
+ 
+   @Override
++  public Table getTable(String dbname, String name,
++                 long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException{
++    return getTable(getDefaultCatalog(conf), dbname, name,
++        txnId, validWriteIdList);
++  };
++
++  @Override
+   public Table getTable(String catName, String dbName, String tableName) throws TException {
+     GetTableRequest req = new GetTableRequest(dbName, tableName);
+     req.setCatName(catName);
+     req.setCapabilities(version);
+     Table t = client.get_table_req(req).getTable();
+     return deepCopy(filterHook.filterTable(t));
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName,
++    long txnId, String validWriteIdList) throws TException {
++    GetTableRequest req = new GetTableRequest(dbName, tableName);
++    req.setCatName(catName);
++    req.setCapabilities(version);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIdList);
++    Table t = client.get_table_req(req).getTable();
++    return deepCopy(filterHook.filterTable(t));
++  }
++
++  @Override
+   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws TException {
+     return getTableObjectsByName(getDefaultCatalog(conf), dbName, tableNames);
+   }
+ 
+   @Override
+   public List<Table> getTableObjectsByName(String catName, String dbName,
+                                            List<String> tableNames) throws TException {
+     GetTablesRequest req = new GetTablesRequest(dbName);
+     req.setCatName(catName);
+     req.setTblNames(tableNames);
+     req.setCapabilities(version);
+     List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+     return deepCopyTables(filterHook.filterTables(tabs));
+   }
+ 
+   @Override
+   public Map<String, Materialization> getMaterializationsInvalidationInfo(String dbName, List<String> viewNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     return client.get_materialization_invalidation_info(
+         dbName, filterHook.filterTableNames(getDefaultCatalog(conf), dbName, viewNames));
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     client.update_creation_metadata(getDefaultCatalog(conf), dbName, tableName, cm);
+   }
+ 
+   @Override
+   public void updateCreationMetadata(String catName, String dbName, String tableName,
+                                      CreationMetadata cm) throws MetaException, TException {
+     client.update_creation_metadata(catName, dbName, tableName, cm);
+ 
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws TException {
+     return listTableNamesByFilter(getDefaultCatalog(conf), dbName, filter, maxTables);
+   }
+ 
+   @Override
+   public List<String> listTableNamesByFilter(String catName, String dbName, String filter,
+                                              int maxTables) throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_table_names_by_filter(prependCatalogToDbName(catName, dbName, conf), filter,
+             shrinkMaxtoShort(maxTables)));
+   }
+ 
+   /**
+    * @param name
+    * @return the type
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+    */
+   public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+     return deepCopy(client.get_type(name));
+   }
+ 
+   @Override
+   public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+     try {
+       return getTables(getDefaultCatalog(conf), dbname, tablePattern);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String tablePattern)
+       throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_tables(prependCatalogToDbName(catName, dbName, conf), tablePattern));
+   }
+ 
+   @Override
+   public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+     try {
+       return getTables(getDefaultCatalog(conf), dbname, tablePattern, tableType);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getTables(String catName, String dbName, String tablePattern,
+                                 TableType tableType) throws TException {
+     return filterHook.filterTableNames(catName, dbName,
+         client.get_tables_by_type(prependCatalogToDbName(catName, dbName, conf), tablePattern,
+             tableType.toString()));
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String dbName) throws TException {
+     return getMaterializedViewsForRewriting(getDefaultCatalog(conf), dbName);
+   }
+ 
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String catName, String dbname)
+       throws MetaException {
+     try {
+       return filterHook.filterTableNames(catName, dbname,
+           client.get_materialized_views_for_rewriting(prependCatalogToDbName(catName, dbname, conf)));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException {
+     try {
+       return getTableMeta(getDefaultCatalog(conf), dbPatterns, tablePatterns, tableTypes);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String catName, String dbPatterns, String tablePatterns,
+                                       List<String> tableTypes) throws TException {
+     return filterHook.filterTableMetas(client.get_table_meta(prependCatalogToDbName(
+         catName, dbPatterns, conf), tablePatterns, tableTypes));
+   }
+ 
+   @Override
+   public List<String> getAllTables(String dbname) throws MetaException {
+     try {
+       return getAllTables(getDefaultCatalog(conf), dbname);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<String> getAllTables(String catName, String dbName) throws TException {
+     return filterHook.filterTableNames(catName, dbName, client.get_all_tables(
+         prependCatalogToDbName(catName, dbName, conf)));
+   }
+ 
+   @Override
+   public boolean tableExists(String databaseName, String tableName) throws TException {
+     return tableExists(getDefaultCatalog(conf), databaseName, tableName);
+   }
+ 
+   @Override
+   public boolean tableExists(String catName, String dbName, String tableName) throws TException {
+     try {
+       GetTableRequest req = new GetTableRequest(dbName, tableName);
+       req.setCatName(catName);
+       req.setCapabilities(version);
+       return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+     } catch (NoSuchObjectException e) {
+       return false;
+     }
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max) throws NoSuchObjectException, MetaException, TException {
+     return listPartitionNames(getDefaultCatalog(conf), dbName, tblName, max);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tableName,
+                                          int maxParts) throws TException {
+     return filterHook.filterPartitionNames(catName, dbName, tableName,
+         client.get_partition_names(prependCatalogToDbName(catName, dbName, conf), tableName, shrinkMaxtoShort(maxParts)));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts) throws TException {
+     return listPartitionNames(getDefaultCatalog(conf), db_name, tbl_name, part_vals, max_parts);
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String db_name, String tbl_name,
+                                          List<String> part_vals, int max_parts) throws TException {
+     return filterHook.filterPartitionNames(catName, db_name, tbl_name,
+         client.get_partition_names_ps(prependCatalogToDbName(catName, db_name, conf), tbl_name,
+             part_vals, shrinkMaxtoShort(max_parts)));
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                       String filter) throws TException {
+     return getNumPartitionsByFilter(getDefaultCatalog(conf), db_name, tbl_name, filter);
+   }
+ 
+   @Override
+   public int getNumPartitionsByFilter(String catName, String dbName, String tableName,
+                                       String filter) throws TException {
+     return client.get_num_partitions_by_filter(prependCatalogToDbName(catName, dbName, conf), tableName,
+         filter);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, null);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+       throws InvalidOperationException, MetaException, TException {
+     alter_partition(getDefaultCatalog(conf), dbName, tblName, newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partition(String catName, String dbName, String tblName, Partition newPart,
+                               EnvironmentContext environmentContext) throws TException {
+     client.alter_partition_with_environment_context(prependCatalogToDbName(catName, dbName, conf), tblName,
+         newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+       throws TException {
 -    alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, null);
++    alter_partitions(
++        getDefaultCatalog(conf), dbName, tblName, newParts, new EnvironmentContext(), -1, null, -1);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
+                                EnvironmentContext environmentContext) throws TException {
 -    alter_partitions(getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext);
++    alter_partitions(
++        getDefaultCatalog(conf), dbName, tblName, newParts, environmentContext, -1, null, -1);
++  }
++
++  @Override
++  public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
++                               EnvironmentContext environmentContext,
++                               long txnId, String writeIdList, long writeId)
++      throws InvalidOperationException, MetaException, TException {
++    //client.alter_partition_with_environment_context(getDefaultCatalog(conf),
++      //  dbName, tblName, newParts, environmentContext);
++    alter_partitions(getDefaultCatalog(conf),
++        dbName, tblName, newParts, environmentContext, txnId, writeIdList, writeId);
++
+   }
+ 
+   @Override
+   public void alter_partitions(String catName, String dbName, String tblName,
+                                List<Partition> newParts,
 -                               EnvironmentContext environmentContext) throws TException {
 -    client.alter_partitions_with_environment_context(prependCatalogToDbName(catName, dbName, conf),
 -        tblName, newParts, environmentContext);
++                               EnvironmentContext environmentContext,
++                               long txnId, String writeIdList, long writeId) throws TException {
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(prependCatalogToDbName(catName, dbName, conf));
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    req.setWriteId(writeId);
++    client.alter_partitions_with_environment_context_req(req);
+   }
+ 
+   @Override
+   public void alterDatabase(String dbName, Database db) throws TException {
+     alterDatabase(getDefaultCatalog(conf), dbName, db);
+   }
+ 
+   @Override
+   public void alterDatabase(String catName, String dbName, Database newDb) throws TException {
+     client.alter_database(prependCatalogToDbName(catName, dbName, conf), newDb);
+   }
+ 
+   @Override
+   public List<FieldSchema> getFields(String db, String tableName) throws TException {
+     return getFields(getDefaultCatalog(conf), db, tableName);
+   }
+ 
+   @Override
+   public List<FieldSchema> getFields(String catName, String db, String tableName)
+       throws TException {
+     List<FieldSchema> fields = client.get_fields(prependCatalogToDbName(catName, db, conf), tableName);
+     return deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest req) throws TException {
+     if (!req.isSetCatName()) {
+       req.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_primary_keys(req).getPrimaryKeys();
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest req) throws MetaException,
+     NoSuchObjectException, TException {
+     if (!req.isSetCatName()) {
+       req.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_foreign_keys(req).getForeignKeys();
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     if (!req.isSetCatName()) {
+       req.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_unique_constraints(req).getUniqueConstraints();
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     if (!req.isSetCatName()) {
+       req.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_not_null_constraints(req).getNotNullConstraints();
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest req)
+       throws MetaException, NoSuchObjectException, TException {
+     if (!req.isSetCatName()) {
+       req.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_default_constraints(req).getDefaultConstraints();
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest req)
+       throws MetaException, NoSuchObjectException, TException {
+     if (!req.isSetCatName()) {
+       req.setCatName(getDefaultCatalog(conf));
+     }
+     return client.get_check_constraints(req).getCheckConstraints();
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean updateTableColumnStatistics(ColumnStatistics statsObj) throws TException {
+     if (!statsObj.getStatsDesc().isSetCatName()) {
+       statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf));
+     }
+     return client.update_table_column_statistics(statsObj);
+   }
+ 
+   @Override
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws TException {
+     if (!statsObj.getStatsDesc().isSetCatName()) {
+       statsObj.getStatsDesc().setCatName(getDefaultCatalog(conf));
+     }
+     return client.update_partition_column_statistics(statsObj);
+   }
+ 
+   @Override
+   public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws TException {
+     String defaultCat = getDefaultCatalog(conf);
+     for (ColumnStatistics stats : request.getColStats()) {
+       if (!stats.getStatsDesc().isSetCatName()) {
+         stats.getStatsDesc().setCatName(defaultCat);
+       }
+     }
+     return client.set_aggr_stats_for(request);
+   }
+ 
+   @Override
+   public void flushCache() {
+     try {
+       client.flushCache();
+     } catch (TException e) {
+       // Not much we can do about it honestly
+       LOG.warn("Got error flushing the cache", e);
+     }
+   }
+ 
+   @Override
+   public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+       List<String> colNames) throws TException {
+     return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames);
+   }
+ 
+   @Override
+   public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName,
+                                                             String tableName,
+                                                             List<String> colNames) throws TException {
+     TableStatsRequest rqst = new TableStatsRequest(dbName, tableName, colNames);
+     rqst.setCatName(catName);
+     return client.get_table_statistics_req(rqst).getTableStats();
+   }
+ 
+   @Override
++  public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
++                                                            List<String> colNames,
++                                                            long txnId,
++                                                            String validWriteIdList) throws TException {
++    return getTableColumnStatistics(getDefaultCatalog(conf), dbName, tableName, colNames,
++        txnId, validWriteIdList);
++  }
++
++  @Override
++  public List<ColumnStatisticsObj> getTableColumnStatistics(String catName, String dbName,
++                                                            String tableName,
++                                                            List<String> colNames,
++                                                            long txnId,
++                                  

<TRUNCATED>

[23/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
new file mode 100644
index 0000000..09ca865
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesRequest.java
@@ -0,0 +1,765 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTablesRequest implements org.apache.thrift.TBase<GetTablesRequest, GetTablesRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetTablesRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTablesRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("tblNames", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetTablesRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetTablesRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private List<String> tblNames; // optional
+  private ClientCapabilities capabilities; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAMES((short)2, "tblNames"),
+    CAPABILITIES((short)3, "capabilities"),
+    CAT_NAME((short)4, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAMES
+          return TBL_NAMES;
+        case 3: // CAPABILITIES
+          return CAPABILITIES;
+        case 4: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.TBL_NAMES,_Fields.CAPABILITIES,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("tblNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTablesRequest.class, metaDataMap);
+  }
+
+  public GetTablesRequest() {
+  }
+
+  public GetTablesRequest(
+    String dbName)
+  {
+    this();
+    this.dbName = dbName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetTablesRequest(GetTablesRequest other) {
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblNames()) {
+      List<String> __this__tblNames = new ArrayList<String>(other.tblNames);
+      this.tblNames = __this__tblNames;
+    }
+    if (other.isSetCapabilities()) {
+      this.capabilities = new ClientCapabilities(other.capabilities);
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public GetTablesRequest deepCopy() {
+    return new GetTablesRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblNames = null;
+    this.capabilities = null;
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public int getTblNamesSize() {
+    return (this.tblNames == null) ? 0 : this.tblNames.size();
+  }
+
+  public java.util.Iterator<String> getTblNamesIterator() {
+    return (this.tblNames == null) ? null : this.tblNames.iterator();
+  }
+
+  public void addToTblNames(String elem) {
+    if (this.tblNames == null) {
+      this.tblNames = new ArrayList<String>();
+    }
+    this.tblNames.add(elem);
+  }
+
+  public List<String> getTblNames() {
+    return this.tblNames;
+  }
+
+  public void setTblNames(List<String> tblNames) {
+    this.tblNames = tblNames;
+  }
+
+  public void unsetTblNames() {
+    this.tblNames = null;
+  }
+
+  /** Returns true if field tblNames is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblNames() {
+    return this.tblNames != null;
+  }
+
+  public void setTblNamesIsSet(boolean value) {
+    if (!value) {
+      this.tblNames = null;
+    }
+  }
+
+  public ClientCapabilities getCapabilities() {
+    return this.capabilities;
+  }
+
+  public void setCapabilities(ClientCapabilities capabilities) {
+    this.capabilities = capabilities;
+  }
+
+  public void unsetCapabilities() {
+    this.capabilities = null;
+  }
+
+  /** Returns true if field capabilities is set (has been assigned a value) and false otherwise */
+  public boolean isSetCapabilities() {
+    return this.capabilities != null;
+  }
+
+  public void setCapabilitiesIsSet(boolean value) {
+    if (!value) {
+      this.capabilities = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAMES:
+      if (value == null) {
+        unsetTblNames();
+      } else {
+        setTblNames((List<String>)value);
+      }
+      break;
+
+    case CAPABILITIES:
+      if (value == null) {
+        unsetCapabilities();
+      } else {
+        setCapabilities((ClientCapabilities)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAMES:
+      return getTblNames();
+
+    case CAPABILITIES:
+      return getCapabilities();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAMES:
+      return isSetTblNames();
+    case CAPABILITIES:
+      return isSetCapabilities();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetTablesRequest)
+      return this.equals((GetTablesRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetTablesRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblNames = true && this.isSetTblNames();
+    boolean that_present_tblNames = true && that.isSetTblNames();
+    if (this_present_tblNames || that_present_tblNames) {
+      if (!(this_present_tblNames && that_present_tblNames))
+        return false;
+      if (!this.tblNames.equals(that.tblNames))
+        return false;
+    }
+
+    boolean this_present_capabilities = true && this.isSetCapabilities();
+    boolean that_present_capabilities = true && that.isSetCapabilities();
+    if (this_present_capabilities || that_present_capabilities) {
+      if (!(this_present_capabilities && that_present_capabilities))
+        return false;
+      if (!this.capabilities.equals(that.capabilities))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblNames = true && (isSetTblNames());
+    list.add(present_tblNames);
+    if (present_tblNames)
+      list.add(tblNames);
+
+    boolean present_capabilities = true && (isSetCapabilities());
+    list.add(present_capabilities);
+    if (present_capabilities)
+      list.add(capabilities);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetTablesRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblNames()).compareTo(other.isSetTblNames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblNames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblNames, other.tblNames);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCapabilities()).compareTo(other.isSetCapabilities());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCapabilities()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.capabilities, other.capabilities);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetTablesRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (isSetTblNames()) {
+      if (!first) sb.append(", ");
+      sb.append("tblNames:");
+      if (this.tblNames == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.tblNames);
+      }
+      first = false;
+    }
+    if (isSetCapabilities()) {
+      if (!first) sb.append(", ");
+      sb.append("capabilities:");
+      if (this.capabilities == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.capabilities);
+      }
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (capabilities != null) {
+      capabilities.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetTablesRequestStandardSchemeFactory implements SchemeFactory {
+    public GetTablesRequestStandardScheme getScheme() {
+      return new GetTablesRequestStandardScheme();
+    }
+  }
+
+  private static class GetTablesRequestStandardScheme extends StandardScheme<GetTablesRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list848 = iprot.readListBegin();
+                struct.tblNames = new ArrayList<String>(_list848.size);
+                String _elem849;
+                for (int _i850 = 0; _i850 < _list848.size; ++_i850)
+                {
+                  _elem849 = iprot.readString();
+                  struct.tblNames.add(_elem849);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTblNamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CAPABILITIES
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.capabilities = new ClientCapabilities();
+              struct.capabilities.read(iprot);
+              struct.setCapabilitiesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblNames != null) {
+        if (struct.isSetTblNames()) {
+          oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tblNames.size()));
+            for (String _iter851 : struct.tblNames)
+            {
+              oprot.writeString(_iter851);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.capabilities != null) {
+        if (struct.isSetCapabilities()) {
+          oprot.writeFieldBegin(CAPABILITIES_FIELD_DESC);
+          struct.capabilities.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetTablesRequestTupleSchemeFactory implements SchemeFactory {
+    public GetTablesRequestTupleScheme getScheme() {
+      return new GetTablesRequestTupleScheme();
+    }
+  }
+
+  private static class GetTablesRequestTupleScheme extends TupleScheme<GetTablesRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetTblNames()) {
+        optionals.set(0);
+      }
+      if (struct.isSetCapabilities()) {
+        optionals.set(1);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetTblNames()) {
+        {
+          oprot.writeI32(struct.tblNames.size());
+          for (String _iter852 : struct.tblNames)
+          {
+            oprot.writeString(_iter852);
+          }
+        }
+      }
+      if (struct.isSetCapabilities()) {
+        struct.capabilities.write(oprot);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list853 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.tblNames = new ArrayList<String>(_list853.size);
+          String _elem854;
+          for (int _i855 = 0; _i855 < _list853.size; ++_i855)
+          {
+            _elem854 = iprot.readString();
+            struct.tblNames.add(_elem854);
+          }
+        }
+        struct.setTblNamesIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.capabilities = new ClientCapabilities();
+        struct.capabilities.read(iprot);
+        struct.setCapabilitiesIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
new file mode 100644
index 0000000..72256e6
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTablesResult.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTablesResult implements org.apache.thrift.TBase<GetTablesResult, GetTablesResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetTablesResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTablesResult");
+
+  private static final org.apache.thrift.protocol.TField TABLES_FIELD_DESC = new org.apache.thrift.protocol.TField("tables", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetTablesResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetTablesResultTupleSchemeFactory());
+  }
+
+  private List<Table> tables; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TABLES((short)1, "tables");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TABLES
+          return TABLES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TABLES, new org.apache.thrift.meta_data.FieldMetaData("tables", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTablesResult.class, metaDataMap);
+  }
+
+  public GetTablesResult() {
+  }
+
+  public GetTablesResult(
+    List<Table> tables)
+  {
+    this();
+    this.tables = tables;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetTablesResult(GetTablesResult other) {
+    if (other.isSetTables()) {
+      List<Table> __this__tables = new ArrayList<Table>(other.tables.size());
+      for (Table other_element : other.tables) {
+        __this__tables.add(new Table(other_element));
+      }
+      this.tables = __this__tables;
+    }
+  }
+
+  public GetTablesResult deepCopy() {
+    return new GetTablesResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.tables = null;
+  }
+
+  public int getTablesSize() {
+    return (this.tables == null) ? 0 : this.tables.size();
+  }
+
+  public java.util.Iterator<Table> getTablesIterator() {
+    return (this.tables == null) ? null : this.tables.iterator();
+  }
+
+  public void addToTables(Table elem) {
+    if (this.tables == null) {
+      this.tables = new ArrayList<Table>();
+    }
+    this.tables.add(elem);
+  }
+
+  public List<Table> getTables() {
+    return this.tables;
+  }
+
+  public void setTables(List<Table> tables) {
+    this.tables = tables;
+  }
+
+  public void unsetTables() {
+    this.tables = null;
+  }
+
+  /** Returns true if field tables is set (has been assigned a value) and false otherwise */
+  public boolean isSetTables() {
+    return this.tables != null;
+  }
+
+  public void setTablesIsSet(boolean value) {
+    if (!value) {
+      this.tables = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TABLES:
+      if (value == null) {
+        unsetTables();
+      } else {
+        setTables((List<Table>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TABLES:
+      return getTables();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TABLES:
+      return isSetTables();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetTablesResult)
+      return this.equals((GetTablesResult)that);
+    return false;
+  }
+
+  public boolean equals(GetTablesResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_tables = true && this.isSetTables();
+    boolean that_present_tables = true && that.isSetTables();
+    if (this_present_tables || that_present_tables) {
+      if (!(this_present_tables && that_present_tables))
+        return false;
+      if (!this.tables.equals(that.tables))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_tables = true && (isSetTables());
+    list.add(present_tables);
+    if (present_tables)
+      list.add(tables);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetTablesResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTables()).compareTo(other.isSetTables());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTables()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tables, other.tables);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetTablesResult(");
+    boolean first = true;
+
+    sb.append("tables:");
+    if (this.tables == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tables);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTables()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tables' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetTablesResultStandardSchemeFactory implements SchemeFactory {
+    public GetTablesResultStandardScheme getScheme() {
+      return new GetTablesResultStandardScheme();
+    }
+  }
+
+  private static class GetTablesResultStandardScheme extends StandardScheme<GetTablesResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetTablesResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TABLES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list856 = iprot.readListBegin();
+                struct.tables = new ArrayList<Table>(_list856.size);
+                Table _elem857;
+                for (int _i858 = 0; _i858 < _list856.size; ++_i858)
+                {
+                  _elem857 = new Table();
+                  _elem857.read(iprot);
+                  struct.tables.add(_elem857);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTablesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetTablesResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.tables != null) {
+        oprot.writeFieldBegin(TABLES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tables.size()));
+          for (Table _iter859 : struct.tables)
+          {
+            _iter859.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetTablesResultTupleSchemeFactory implements SchemeFactory {
+    public GetTablesResultTupleScheme getScheme() {
+      return new GetTablesResultTupleScheme();
+    }
+  }
+
+  private static class GetTablesResultTupleScheme extends TupleScheme<GetTablesResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.tables.size());
+        for (Table _iter860 : struct.tables)
+        {
+          _iter860.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetTablesResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list861 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.tables = new ArrayList<Table>(_list861.size);
+        Table _elem862;
+        for (int _i863 = 0; _i863 < _list861.size; ++_i863)
+        {
+          _elem862 = new Table();
+          _elem862.read(iprot);
+          struct.tables.add(_elem862);
+        }
+      }
+      struct.setTablesIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java
new file mode 100644
index 0000000..af62ca1
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsRequest.java
@@ -0,0 +1,539 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetValidWriteIdsRequest implements org.apache.thrift.TBase<GetValidWriteIdsRequest, GetValidWriteIdsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetValidWriteIdsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsRequest");
+
+  private static final org.apache.thrift.protocol.TField FULL_TABLE_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("fullTableNames", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetValidWriteIdsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetValidWriteIdsRequestTupleSchemeFactory());
+  }
+
+  private List<String> fullTableNames; // required
+  private String validTxnList; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FULL_TABLE_NAMES((short)1, "fullTableNames"),
+    VALID_TXN_LIST((short)2, "validTxnList");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FULL_TABLE_NAMES
+          return FULL_TABLE_NAMES;
+        case 2: // VALID_TXN_LIST
+          return VALID_TXN_LIST;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FULL_TABLE_NAMES, new org.apache.thrift.meta_data.FieldMetaData("fullTableNames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsRequest.class, metaDataMap);
+  }
+
+  public GetValidWriteIdsRequest() {
+  }
+
+  public GetValidWriteIdsRequest(
+    List<String> fullTableNames,
+    String validTxnList)
+  {
+    this();
+    this.fullTableNames = fullTableNames;
+    this.validTxnList = validTxnList;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetValidWriteIdsRequest(GetValidWriteIdsRequest other) {
+    if (other.isSetFullTableNames()) {
+      List<String> __this__fullTableNames = new ArrayList<String>(other.fullTableNames);
+      this.fullTableNames = __this__fullTableNames;
+    }
+    if (other.isSetValidTxnList()) {
+      this.validTxnList = other.validTxnList;
+    }
+  }
+
+  public GetValidWriteIdsRequest deepCopy() {
+    return new GetValidWriteIdsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.fullTableNames = null;
+    this.validTxnList = null;
+  }
+
+  public int getFullTableNamesSize() {
+    return (this.fullTableNames == null) ? 0 : this.fullTableNames.size();
+  }
+
+  public java.util.Iterator<String> getFullTableNamesIterator() {
+    return (this.fullTableNames == null) ? null : this.fullTableNames.iterator();
+  }
+
+  public void addToFullTableNames(String elem) {
+    if (this.fullTableNames == null) {
+      this.fullTableNames = new ArrayList<String>();
+    }
+    this.fullTableNames.add(elem);
+  }
+
+  public List<String> getFullTableNames() {
+    return this.fullTableNames;
+  }
+
+  public void setFullTableNames(List<String> fullTableNames) {
+    this.fullTableNames = fullTableNames;
+  }
+
+  public void unsetFullTableNames() {
+    this.fullTableNames = null;
+  }
+
+  /** Returns true if field fullTableNames is set (has been assigned a value) and false otherwise */
+  public boolean isSetFullTableNames() {
+    return this.fullTableNames != null;
+  }
+
+  public void setFullTableNamesIsSet(boolean value) {
+    if (!value) {
+      this.fullTableNames = null;
+    }
+  }
+
+  public String getValidTxnList() {
+    return this.validTxnList;
+  }
+
+  public void setValidTxnList(String validTxnList) {
+    this.validTxnList = validTxnList;
+  }
+
+  public void unsetValidTxnList() {
+    this.validTxnList = null;
+  }
+
+  /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidTxnList() {
+    return this.validTxnList != null;
+  }
+
+  public void setValidTxnListIsSet(boolean value) {
+    if (!value) {
+      this.validTxnList = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FULL_TABLE_NAMES:
+      if (value == null) {
+        unsetFullTableNames();
+      } else {
+        setFullTableNames((List<String>)value);
+      }
+      break;
+
+    case VALID_TXN_LIST:
+      if (value == null) {
+        unsetValidTxnList();
+      } else {
+        setValidTxnList((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FULL_TABLE_NAMES:
+      return getFullTableNames();
+
+    case VALID_TXN_LIST:
+      return getValidTxnList();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FULL_TABLE_NAMES:
+      return isSetFullTableNames();
+    case VALID_TXN_LIST:
+      return isSetValidTxnList();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetValidWriteIdsRequest)
+      return this.equals((GetValidWriteIdsRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetValidWriteIdsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_fullTableNames = true && this.isSetFullTableNames();
+    boolean that_present_fullTableNames = true && that.isSetFullTableNames();
+    if (this_present_fullTableNames || that_present_fullTableNames) {
+      if (!(this_present_fullTableNames && that_present_fullTableNames))
+        return false;
+      if (!this.fullTableNames.equals(that.fullTableNames))
+        return false;
+    }
+
+    boolean this_present_validTxnList = true && this.isSetValidTxnList();
+    boolean that_present_validTxnList = true && that.isSetValidTxnList();
+    if (this_present_validTxnList || that_present_validTxnList) {
+      if (!(this_present_validTxnList && that_present_validTxnList))
+        return false;
+      if (!this.validTxnList.equals(that.validTxnList))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_fullTableNames = true && (isSetFullTableNames());
+    list.add(present_fullTableNames);
+    if (present_fullTableNames)
+      list.add(fullTableNames);
+
+    boolean present_validTxnList = true && (isSetValidTxnList());
+    list.add(present_validTxnList);
+    if (present_validTxnList)
+      list.add(validTxnList);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetValidWriteIdsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFullTableNames()).compareTo(other.isSetFullTableNames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFullTableNames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fullTableNames, other.fullTableNames);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidTxnList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetValidWriteIdsRequest(");
+    boolean first = true;
+
+    sb.append("fullTableNames:");
+    if (this.fullTableNames == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fullTableNames);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validTxnList:");
+    if (this.validTxnList == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.validTxnList);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFullTableNames()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'fullTableNames' is unset! Struct:" + toString());
+    }
+
+    if (!isSetValidTxnList()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'validTxnList' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetValidWriteIdsRequestStandardSchemeFactory implements SchemeFactory {
+    public GetValidWriteIdsRequestStandardScheme getScheme() {
+      return new GetValidWriteIdsRequestStandardScheme();
+    }
+  }
+
+  private static class GetValidWriteIdsRequestStandardScheme extends StandardScheme<GetValidWriteIdsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FULL_TABLE_NAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list610 = iprot.readListBegin();
+                struct.fullTableNames = new ArrayList<String>(_list610.size);
+                String _elem611;
+                for (int _i612 = 0; _i612 < _list610.size; ++_i612)
+                {
+                  _elem611 = iprot.readString();
+                  struct.fullTableNames.add(_elem611);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFullTableNamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // VALID_TXN_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.validTxnList = iprot.readString();
+              struct.setValidTxnListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.fullTableNames != null) {
+        oprot.writeFieldBegin(FULL_TABLE_NAMES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.fullTableNames.size()));
+          for (String _iter613 : struct.fullTableNames)
+          {
+            oprot.writeString(_iter613);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.validTxnList != null) {
+        oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC);
+        oprot.writeString(struct.validTxnList);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetValidWriteIdsRequestTupleSchemeFactory implements SchemeFactory {
+    public GetValidWriteIdsRequestTupleScheme getScheme() {
+      return new GetValidWriteIdsRequestTupleScheme();
+    }
+  }
+
+  private static class GetValidWriteIdsRequestTupleScheme extends TupleScheme<GetValidWriteIdsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.fullTableNames.size());
+        for (String _iter614 : struct.fullTableNames)
+        {
+          oprot.writeString(_iter614);
+        }
+      }
+      oprot.writeString(struct.validTxnList);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list615 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.fullTableNames = new ArrayList<String>(_list615.size);
+        String _elem616;
+        for (int _i617 = 0; _i617 < _list615.size; ++_i617)
+        {
+          _elem616 = iprot.readString();
+          struct.fullTableNames.add(_elem616);
+        }
+      }
+      struct.setFullTableNamesIsSet(true);
+      struct.validTxnList = iprot.readString();
+      struct.setValidTxnListIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java
new file mode 100644
index 0000000..615a422
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetValidWriteIdsResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetValidWriteIdsResponse implements org.apache.thrift.TBase<GetValidWriteIdsResponse, GetValidWriteIdsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetValidWriteIdsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetValidWriteIdsResponse");
+
+  private static final org.apache.thrift.protocol.TField TBL_VALID_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("tblValidWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetValidWriteIdsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetValidWriteIdsResponseTupleSchemeFactory());
+  }
+
+  private List<TableValidWriteIds> tblValidWriteIds; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TBL_VALID_WRITE_IDS((short)1, "tblValidWriteIds");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TBL_VALID_WRITE_IDS
+          return TBL_VALID_WRITE_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TBL_VALID_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("tblValidWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableValidWriteIds.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetValidWriteIdsResponse.class, metaDataMap);
+  }
+
+  public GetValidWriteIdsResponse() {
+  }
+
+  public GetValidWriteIdsResponse(
+    List<TableValidWriteIds> tblValidWriteIds)
+  {
+    this();
+    this.tblValidWriteIds = tblValidWriteIds;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetValidWriteIdsResponse(GetValidWriteIdsResponse other) {
+    if (other.isSetTblValidWriteIds()) {
+      List<TableValidWriteIds> __this__tblValidWriteIds = new ArrayList<TableValidWriteIds>(other.tblValidWriteIds.size());
+      for (TableValidWriteIds other_element : other.tblValidWriteIds) {
+        __this__tblValidWriteIds.add(new TableValidWriteIds(other_element));
+      }
+      this.tblValidWriteIds = __this__tblValidWriteIds;
+    }
+  }
+
+  public GetValidWriteIdsResponse deepCopy() {
+    return new GetValidWriteIdsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.tblValidWriteIds = null;
+  }
+
+  public int getTblValidWriteIdsSize() {
+    return (this.tblValidWriteIds == null) ? 0 : this.tblValidWriteIds.size();
+  }
+
+  public java.util.Iterator<TableValidWriteIds> getTblValidWriteIdsIterator() {
+    return (this.tblValidWriteIds == null) ? null : this.tblValidWriteIds.iterator();
+  }
+
+  public void addToTblValidWriteIds(TableValidWriteIds elem) {
+    if (this.tblValidWriteIds == null) {
+      this.tblValidWriteIds = new ArrayList<TableValidWriteIds>();
+    }
+    this.tblValidWriteIds.add(elem);
+  }
+
+  public List<TableValidWriteIds> getTblValidWriteIds() {
+    return this.tblValidWriteIds;
+  }
+
+  public void setTblValidWriteIds(List<TableValidWriteIds> tblValidWriteIds) {
+    this.tblValidWriteIds = tblValidWriteIds;
+  }
+
+  public void unsetTblValidWriteIds() {
+    this.tblValidWriteIds = null;
+  }
+
+  /** Returns true if field tblValidWriteIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblValidWriteIds() {
+    return this.tblValidWriteIds != null;
+  }
+
+  public void setTblValidWriteIdsIsSet(boolean value) {
+    if (!value) {
+      this.tblValidWriteIds = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TBL_VALID_WRITE_IDS:
+      if (value == null) {
+        unsetTblValidWriteIds();
+      } else {
+        setTblValidWriteIds((List<TableValidWriteIds>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TBL_VALID_WRITE_IDS:
+      return getTblValidWriteIds();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TBL_VALID_WRITE_IDS:
+      return isSetTblValidWriteIds();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetValidWriteIdsResponse)
+      return this.equals((GetValidWriteIdsResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetValidWriteIdsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_tblValidWriteIds = true && this.isSetTblValidWriteIds();
+    boolean that_present_tblValidWriteIds = true && that.isSetTblValidWriteIds();
+    if (this_present_tblValidWriteIds || that_present_tblValidWriteIds) {
+      if (!(this_present_tblValidWriteIds && that_present_tblValidWriteIds))
+        return false;
+      if (!this.tblValidWriteIds.equals(that.tblValidWriteIds))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_tblValidWriteIds = true && (isSetTblValidWriteIds());
+    list.add(present_tblValidWriteIds);
+    if (present_tblValidWriteIds)
+      list.add(tblValidWriteIds);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetValidWriteIdsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTblValidWriteIds()).compareTo(other.isSetTblValidWriteIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblValidWriteIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblValidWriteIds, other.tblValidWriteIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetValidWriteIdsResponse(");
+    boolean first = true;
+
+    sb.append("tblValidWriteIds:");
+    if (this.tblValidWriteIds == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblValidWriteIds);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTblValidWriteIds()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblValidWriteIds' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetValidWriteIdsResponseStandardSchemeFactory implements SchemeFactory {
+    public GetValidWriteIdsResponseStandardScheme getScheme() {
+      return new GetValidWriteIdsResponseStandardScheme();
+    }
+  }
+
+  private static class GetValidWriteIdsResponseStandardScheme extends StandardScheme<GetValidWriteIdsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TBL_VALID_WRITE_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list626 = iprot.readListBegin();
+                struct.tblValidWriteIds = new ArrayList<TableValidWriteIds>(_list626.size);
+                TableValidWriteIds _elem627;
+                for (int _i628 = 0; _i628 < _list626.size; ++_i628)
+                {
+                  _elem627 = new TableValidWriteIds();
+                  _elem627.read(iprot);
+                  struct.tblValidWriteIds.add(_elem627);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTblValidWriteIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.tblValidWriteIds != null) {
+        oprot.writeFieldBegin(TBL_VALID_WRITE_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tblValidWriteIds.size()));
+          for (TableValidWriteIds _iter629 : struct.tblValidWriteIds)
+          {
+            _iter629.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetValidWriteIdsResponseTupleSchemeFactory implements SchemeFactory {
+    public GetValidWriteIdsResponseTupleScheme getScheme() {
+      return new GetValidWriteIdsResponseTupleScheme();
+    }
+  }
+
+  private static class GetValidWriteIdsResponseTupleScheme extends TupleScheme<GetValidWriteIdsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.tblValidWriteIds.size());
+        for (TableValidWriteIds _iter630 : struct.tblValidWriteIds)
+        {
+          _iter630.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetValidWriteIdsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.tblValidWriteIds = new ArrayList<TableValidWriteIds>(_list631.size);
+        TableValidWriteIds _elem632;
+        for (int _i633 = 0; _i633 < _list631.size; ++_i633)
+        {
+          _elem632 = new TableValidWriteIds();
+          _elem632.read(iprot);
+          struct.tblValidWriteIds.add(_elem632);
+        }
+      }
+      struct.setTblValidWriteIdsIsSet(true);
+    }
+  }
+
+}
+


[07/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
new file mode 100644
index 0000000..b0818bb
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java
@@ -0,0 +1,906 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PrincipalPrivilegeSet implements org.apache.thrift.TBase<PrincipalPrivilegeSet, PrincipalPrivilegeSet._Fields>, java.io.Serializable, Cloneable, Comparable<PrincipalPrivilegeSet> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrincipalPrivilegeSet");
+
+  private static final org.apache.thrift.protocol.TField USER_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("userPrivileges", org.apache.thrift.protocol.TType.MAP, (short)1);
+  private static final org.apache.thrift.protocol.TField GROUP_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("groupPrivileges", org.apache.thrift.protocol.TType.MAP, (short)2);
+  private static final org.apache.thrift.protocol.TField ROLE_PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("rolePrivileges", org.apache.thrift.protocol.TType.MAP, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PrincipalPrivilegeSetStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PrincipalPrivilegeSetTupleSchemeFactory());
+  }
+
+  private Map<String,List<PrivilegeGrantInfo>> userPrivileges; // required
+  private Map<String,List<PrivilegeGrantInfo>> groupPrivileges; // required
+  private Map<String,List<PrivilegeGrantInfo>> rolePrivileges; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    USER_PRIVILEGES((short)1, "userPrivileges"),
+    GROUP_PRIVILEGES((short)2, "groupPrivileges"),
+    ROLE_PRIVILEGES((short)3, "rolePrivileges");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // USER_PRIVILEGES
+          return USER_PRIVILEGES;
+        case 2: // GROUP_PRIVILEGES
+          return GROUP_PRIVILEGES;
+        case 3: // ROLE_PRIVILEGES
+          return ROLE_PRIVILEGES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.USER_PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("userPrivileges", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrivilegeGrantInfo.class)))));
+    tmpMap.put(_Fields.GROUP_PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("groupPrivileges", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrivilegeGrantInfo.class)))));
+    tmpMap.put(_Fields.ROLE_PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("rolePrivileges", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrivilegeGrantInfo.class)))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrincipalPrivilegeSet.class, metaDataMap);
+  }
+
+  public PrincipalPrivilegeSet() {
+  }
+
+  public PrincipalPrivilegeSet(
+    Map<String,List<PrivilegeGrantInfo>> userPrivileges,
+    Map<String,List<PrivilegeGrantInfo>> groupPrivileges,
+    Map<String,List<PrivilegeGrantInfo>> rolePrivileges)
+  {
+    this();
+    this.userPrivileges = userPrivileges;
+    this.groupPrivileges = groupPrivileges;
+    this.rolePrivileges = rolePrivileges;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PrincipalPrivilegeSet(PrincipalPrivilegeSet other) {
+    if (other.isSetUserPrivileges()) {
+      Map<String,List<PrivilegeGrantInfo>> __this__userPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(other.userPrivileges.size());
+      for (Map.Entry<String, List<PrivilegeGrantInfo>> other_element : other.userPrivileges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        List<PrivilegeGrantInfo> other_element_value = other_element.getValue();
+
+        String __this__userPrivileges_copy_key = other_element_key;
+
+        List<PrivilegeGrantInfo> __this__userPrivileges_copy_value = new ArrayList<PrivilegeGrantInfo>(other_element_value.size());
+        for (PrivilegeGrantInfo other_element_value_element : other_element_value) {
+          __this__userPrivileges_copy_value.add(new PrivilegeGrantInfo(other_element_value_element));
+        }
+
+        __this__userPrivileges.put(__this__userPrivileges_copy_key, __this__userPrivileges_copy_value);
+      }
+      this.userPrivileges = __this__userPrivileges;
+    }
+    if (other.isSetGroupPrivileges()) {
+      Map<String,List<PrivilegeGrantInfo>> __this__groupPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(other.groupPrivileges.size());
+      for (Map.Entry<String, List<PrivilegeGrantInfo>> other_element : other.groupPrivileges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        List<PrivilegeGrantInfo> other_element_value = other_element.getValue();
+
+        String __this__groupPrivileges_copy_key = other_element_key;
+
+        List<PrivilegeGrantInfo> __this__groupPrivileges_copy_value = new ArrayList<PrivilegeGrantInfo>(other_element_value.size());
+        for (PrivilegeGrantInfo other_element_value_element : other_element_value) {
+          __this__groupPrivileges_copy_value.add(new PrivilegeGrantInfo(other_element_value_element));
+        }
+
+        __this__groupPrivileges.put(__this__groupPrivileges_copy_key, __this__groupPrivileges_copy_value);
+      }
+      this.groupPrivileges = __this__groupPrivileges;
+    }
+    if (other.isSetRolePrivileges()) {
+      Map<String,List<PrivilegeGrantInfo>> __this__rolePrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(other.rolePrivileges.size());
+      for (Map.Entry<String, List<PrivilegeGrantInfo>> other_element : other.rolePrivileges.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        List<PrivilegeGrantInfo> other_element_value = other_element.getValue();
+
+        String __this__rolePrivileges_copy_key = other_element_key;
+
+        List<PrivilegeGrantInfo> __this__rolePrivileges_copy_value = new ArrayList<PrivilegeGrantInfo>(other_element_value.size());
+        for (PrivilegeGrantInfo other_element_value_element : other_element_value) {
+          __this__rolePrivileges_copy_value.add(new PrivilegeGrantInfo(other_element_value_element));
+        }
+
+        __this__rolePrivileges.put(__this__rolePrivileges_copy_key, __this__rolePrivileges_copy_value);
+      }
+      this.rolePrivileges = __this__rolePrivileges;
+    }
+  }
+
+  public PrincipalPrivilegeSet deepCopy() {
+    return new PrincipalPrivilegeSet(this);
+  }
+
+  @Override
+  public void clear() {
+    this.userPrivileges = null;
+    this.groupPrivileges = null;
+    this.rolePrivileges = null;
+  }
+
+  public int getUserPrivilegesSize() {
+    return (this.userPrivileges == null) ? 0 : this.userPrivileges.size();
+  }
+
+  public void putToUserPrivileges(String key, List<PrivilegeGrantInfo> val) {
+    if (this.userPrivileges == null) {
+      this.userPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>();
+    }
+    this.userPrivileges.put(key, val);
+  }
+
+  public Map<String,List<PrivilegeGrantInfo>> getUserPrivileges() {
+    return this.userPrivileges;
+  }
+
+  public void setUserPrivileges(Map<String,List<PrivilegeGrantInfo>> userPrivileges) {
+    this.userPrivileges = userPrivileges;
+  }
+
+  public void unsetUserPrivileges() {
+    this.userPrivileges = null;
+  }
+
+  /** Returns true if field userPrivileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetUserPrivileges() {
+    return this.userPrivileges != null;
+  }
+
+  public void setUserPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.userPrivileges = null;
+    }
+  }
+
+  public int getGroupPrivilegesSize() {
+    return (this.groupPrivileges == null) ? 0 : this.groupPrivileges.size();
+  }
+
+  public void putToGroupPrivileges(String key, List<PrivilegeGrantInfo> val) {
+    if (this.groupPrivileges == null) {
+      this.groupPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>();
+    }
+    this.groupPrivileges.put(key, val);
+  }
+
+  public Map<String,List<PrivilegeGrantInfo>> getGroupPrivileges() {
+    return this.groupPrivileges;
+  }
+
+  public void setGroupPrivileges(Map<String,List<PrivilegeGrantInfo>> groupPrivileges) {
+    this.groupPrivileges = groupPrivileges;
+  }
+
+  public void unsetGroupPrivileges() {
+    this.groupPrivileges = null;
+  }
+
+  /** Returns true if field groupPrivileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetGroupPrivileges() {
+    return this.groupPrivileges != null;
+  }
+
+  public void setGroupPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.groupPrivileges = null;
+    }
+  }
+
+  public int getRolePrivilegesSize() {
+    return (this.rolePrivileges == null) ? 0 : this.rolePrivileges.size();
+  }
+
+  public void putToRolePrivileges(String key, List<PrivilegeGrantInfo> val) {
+    if (this.rolePrivileges == null) {
+      this.rolePrivileges = new HashMap<String,List<PrivilegeGrantInfo>>();
+    }
+    this.rolePrivileges.put(key, val);
+  }
+
+  public Map<String,List<PrivilegeGrantInfo>> getRolePrivileges() {
+    return this.rolePrivileges;
+  }
+
+  public void setRolePrivileges(Map<String,List<PrivilegeGrantInfo>> rolePrivileges) {
+    this.rolePrivileges = rolePrivileges;
+  }
+
+  public void unsetRolePrivileges() {
+    this.rolePrivileges = null;
+  }
+
+  /** Returns true if field rolePrivileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetRolePrivileges() {
+    return this.rolePrivileges != null;
+  }
+
+  public void setRolePrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.rolePrivileges = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case USER_PRIVILEGES:
+      if (value == null) {
+        unsetUserPrivileges();
+      } else {
+        setUserPrivileges((Map<String,List<PrivilegeGrantInfo>>)value);
+      }
+      break;
+
+    case GROUP_PRIVILEGES:
+      if (value == null) {
+        unsetGroupPrivileges();
+      } else {
+        setGroupPrivileges((Map<String,List<PrivilegeGrantInfo>>)value);
+      }
+      break;
+
+    case ROLE_PRIVILEGES:
+      if (value == null) {
+        unsetRolePrivileges();
+      } else {
+        setRolePrivileges((Map<String,List<PrivilegeGrantInfo>>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case USER_PRIVILEGES:
+      return getUserPrivileges();
+
+    case GROUP_PRIVILEGES:
+      return getGroupPrivileges();
+
+    case ROLE_PRIVILEGES:
+      return getRolePrivileges();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case USER_PRIVILEGES:
+      return isSetUserPrivileges();
+    case GROUP_PRIVILEGES:
+      return isSetGroupPrivileges();
+    case ROLE_PRIVILEGES:
+      return isSetRolePrivileges();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PrincipalPrivilegeSet)
+      return this.equals((PrincipalPrivilegeSet)that);
+    return false;
+  }
+
+  public boolean equals(PrincipalPrivilegeSet that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_userPrivileges = true && this.isSetUserPrivileges();
+    boolean that_present_userPrivileges = true && that.isSetUserPrivileges();
+    if (this_present_userPrivileges || that_present_userPrivileges) {
+      if (!(this_present_userPrivileges && that_present_userPrivileges))
+        return false;
+      if (!this.userPrivileges.equals(that.userPrivileges))
+        return false;
+    }
+
+    boolean this_present_groupPrivileges = true && this.isSetGroupPrivileges();
+    boolean that_present_groupPrivileges = true && that.isSetGroupPrivileges();
+    if (this_present_groupPrivileges || that_present_groupPrivileges) {
+      if (!(this_present_groupPrivileges && that_present_groupPrivileges))
+        return false;
+      if (!this.groupPrivileges.equals(that.groupPrivileges))
+        return false;
+    }
+
+    boolean this_present_rolePrivileges = true && this.isSetRolePrivileges();
+    boolean that_present_rolePrivileges = true && that.isSetRolePrivileges();
+    if (this_present_rolePrivileges || that_present_rolePrivileges) {
+      if (!(this_present_rolePrivileges && that_present_rolePrivileges))
+        return false;
+      if (!this.rolePrivileges.equals(that.rolePrivileges))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_userPrivileges = true && (isSetUserPrivileges());
+    list.add(present_userPrivileges);
+    if (present_userPrivileges)
+      list.add(userPrivileges);
+
+    boolean present_groupPrivileges = true && (isSetGroupPrivileges());
+    list.add(present_groupPrivileges);
+    if (present_groupPrivileges)
+      list.add(groupPrivileges);
+
+    boolean present_rolePrivileges = true && (isSetRolePrivileges());
+    list.add(present_rolePrivileges);
+    if (present_rolePrivileges)
+      list.add(rolePrivileges);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PrincipalPrivilegeSet other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetUserPrivileges()).compareTo(other.isSetUserPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUserPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.userPrivileges, other.userPrivileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGroupPrivileges()).compareTo(other.isSetGroupPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGroupPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.groupPrivileges, other.groupPrivileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRolePrivileges()).compareTo(other.isSetRolePrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRolePrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rolePrivileges, other.rolePrivileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PrincipalPrivilegeSet(");
+    boolean first = true;
+
+    sb.append("userPrivileges:");
+    if (this.userPrivileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.userPrivileges);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("groupPrivileges:");
+    if (this.groupPrivileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.groupPrivileges);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("rolePrivileges:");
+    if (this.rolePrivileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.rolePrivileges);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PrincipalPrivilegeSetStandardSchemeFactory implements SchemeFactory {
+    public PrincipalPrivilegeSetStandardScheme getScheme() {
+      return new PrincipalPrivilegeSetStandardScheme();
+    }
+  }
+
+  private static class PrincipalPrivilegeSetStandardScheme extends StandardScheme<PrincipalPrivilegeSet> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PrincipalPrivilegeSet struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // USER_PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map24 = iprot.readMapBegin();
+                struct.userPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(2*_map24.size);
+                String _key25;
+                List<PrivilegeGrantInfo> _val26;
+                for (int _i27 = 0; _i27 < _map24.size; ++_i27)
+                {
+                  _key25 = iprot.readString();
+                  {
+                    org.apache.thrift.protocol.TList _list28 = iprot.readListBegin();
+                    _val26 = new ArrayList<PrivilegeGrantInfo>(_list28.size);
+                    PrivilegeGrantInfo _elem29;
+                    for (int _i30 = 0; _i30 < _list28.size; ++_i30)
+                    {
+                      _elem29 = new PrivilegeGrantInfo();
+                      _elem29.read(iprot);
+                      _val26.add(_elem29);
+                    }
+                    iprot.readListEnd();
+                  }
+                  struct.userPrivileges.put(_key25, _val26);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setUserPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // GROUP_PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map31 = iprot.readMapBegin();
+                struct.groupPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(2*_map31.size);
+                String _key32;
+                List<PrivilegeGrantInfo> _val33;
+                for (int _i34 = 0; _i34 < _map31.size; ++_i34)
+                {
+                  _key32 = iprot.readString();
+                  {
+                    org.apache.thrift.protocol.TList _list35 = iprot.readListBegin();
+                    _val33 = new ArrayList<PrivilegeGrantInfo>(_list35.size);
+                    PrivilegeGrantInfo _elem36;
+                    for (int _i37 = 0; _i37 < _list35.size; ++_i37)
+                    {
+                      _elem36 = new PrivilegeGrantInfo();
+                      _elem36.read(iprot);
+                      _val33.add(_elem36);
+                    }
+                    iprot.readListEnd();
+                  }
+                  struct.groupPrivileges.put(_key32, _val33);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setGroupPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // ROLE_PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map38 = iprot.readMapBegin();
+                struct.rolePrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(2*_map38.size);
+                String _key39;
+                List<PrivilegeGrantInfo> _val40;
+                for (int _i41 = 0; _i41 < _map38.size; ++_i41)
+                {
+                  _key39 = iprot.readString();
+                  {
+                    org.apache.thrift.protocol.TList _list42 = iprot.readListBegin();
+                    _val40 = new ArrayList<PrivilegeGrantInfo>(_list42.size);
+                    PrivilegeGrantInfo _elem43;
+                    for (int _i44 = 0; _i44 < _list42.size; ++_i44)
+                    {
+                      _elem43 = new PrivilegeGrantInfo();
+                      _elem43.read(iprot);
+                      _val40.add(_elem43);
+                    }
+                    iprot.readListEnd();
+                  }
+                  struct.rolePrivileges.put(_key39, _val40);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setRolePrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PrincipalPrivilegeSet struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.userPrivileges != null) {
+        oprot.writeFieldBegin(USER_PRIVILEGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.userPrivileges.size()));
+          for (Map.Entry<String, List<PrivilegeGrantInfo>> _iter45 : struct.userPrivileges.entrySet())
+          {
+            oprot.writeString(_iter45.getKey());
+            {
+              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter45.getValue().size()));
+              for (PrivilegeGrantInfo _iter46 : _iter45.getValue())
+              {
+                _iter46.write(oprot);
+              }
+              oprot.writeListEnd();
+            }
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.groupPrivileges != null) {
+        oprot.writeFieldBegin(GROUP_PRIVILEGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.groupPrivileges.size()));
+          for (Map.Entry<String, List<PrivilegeGrantInfo>> _iter47 : struct.groupPrivileges.entrySet())
+          {
+            oprot.writeString(_iter47.getKey());
+            {
+              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter47.getValue().size()));
+              for (PrivilegeGrantInfo _iter48 : _iter47.getValue())
+              {
+                _iter48.write(oprot);
+              }
+              oprot.writeListEnd();
+            }
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.rolePrivileges != null) {
+        oprot.writeFieldBegin(ROLE_PRIVILEGES_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.rolePrivileges.size()));
+          for (Map.Entry<String, List<PrivilegeGrantInfo>> _iter49 : struct.rolePrivileges.entrySet())
+          {
+            oprot.writeString(_iter49.getKey());
+            {
+              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter49.getValue().size()));
+              for (PrivilegeGrantInfo _iter50 : _iter49.getValue())
+              {
+                _iter50.write(oprot);
+              }
+              oprot.writeListEnd();
+            }
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PrincipalPrivilegeSetTupleSchemeFactory implements SchemeFactory {
+    public PrincipalPrivilegeSetTupleScheme getScheme() {
+      return new PrincipalPrivilegeSetTupleScheme();
+    }
+  }
+
+  private static class PrincipalPrivilegeSetTupleScheme extends TupleScheme<PrincipalPrivilegeSet> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PrincipalPrivilegeSet struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetUserPrivileges()) {
+        optionals.set(0);
+      }
+      if (struct.isSetGroupPrivileges()) {
+        optionals.set(1);
+      }
+      if (struct.isSetRolePrivileges()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetUserPrivileges()) {
+        {
+          oprot.writeI32(struct.userPrivileges.size());
+          for (Map.Entry<String, List<PrivilegeGrantInfo>> _iter51 : struct.userPrivileges.entrySet())
+          {
+            oprot.writeString(_iter51.getKey());
+            {
+              oprot.writeI32(_iter51.getValue().size());
+              for (PrivilegeGrantInfo _iter52 : _iter51.getValue())
+              {
+                _iter52.write(oprot);
+              }
+            }
+          }
+        }
+      }
+      if (struct.isSetGroupPrivileges()) {
+        {
+          oprot.writeI32(struct.groupPrivileges.size());
+          for (Map.Entry<String, List<PrivilegeGrantInfo>> _iter53 : struct.groupPrivileges.entrySet())
+          {
+            oprot.writeString(_iter53.getKey());
+            {
+              oprot.writeI32(_iter53.getValue().size());
+              for (PrivilegeGrantInfo _iter54 : _iter53.getValue())
+              {
+                _iter54.write(oprot);
+              }
+            }
+          }
+        }
+      }
+      if (struct.isSetRolePrivileges()) {
+        {
+          oprot.writeI32(struct.rolePrivileges.size());
+          for (Map.Entry<String, List<PrivilegeGrantInfo>> _iter55 : struct.rolePrivileges.entrySet())
+          {
+            oprot.writeString(_iter55.getKey());
+            {
+              oprot.writeI32(_iter55.getValue().size());
+              for (PrivilegeGrantInfo _iter56 : _iter55.getValue())
+              {
+                _iter56.write(oprot);
+              }
+            }
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PrincipalPrivilegeSet struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TMap _map57 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+          struct.userPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(2*_map57.size);
+          String _key58;
+          List<PrivilegeGrantInfo> _val59;
+          for (int _i60 = 0; _i60 < _map57.size; ++_i60)
+          {
+            _key58 = iprot.readString();
+            {
+              org.apache.thrift.protocol.TList _list61 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+              _val59 = new ArrayList<PrivilegeGrantInfo>(_list61.size);
+              PrivilegeGrantInfo _elem62;
+              for (int _i63 = 0; _i63 < _list61.size; ++_i63)
+              {
+                _elem62 = new PrivilegeGrantInfo();
+                _elem62.read(iprot);
+                _val59.add(_elem62);
+              }
+            }
+            struct.userPrivileges.put(_key58, _val59);
+          }
+        }
+        struct.setUserPrivilegesIsSet(true);
+      }
+      if (incoming.get(1)) {
+        {
+          org.apache.thrift.protocol.TMap _map64 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+          struct.groupPrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(2*_map64.size);
+          String _key65;
+          List<PrivilegeGrantInfo> _val66;
+          for (int _i67 = 0; _i67 < _map64.size; ++_i67)
+          {
+            _key65 = iprot.readString();
+            {
+              org.apache.thrift.protocol.TList _list68 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+              _val66 = new ArrayList<PrivilegeGrantInfo>(_list68.size);
+              PrivilegeGrantInfo _elem69;
+              for (int _i70 = 0; _i70 < _list68.size; ++_i70)
+              {
+                _elem69 = new PrivilegeGrantInfo();
+                _elem69.read(iprot);
+                _val66.add(_elem69);
+              }
+            }
+            struct.groupPrivileges.put(_key65, _val66);
+          }
+        }
+        struct.setGroupPrivilegesIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TMap _map71 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+          struct.rolePrivileges = new HashMap<String,List<PrivilegeGrantInfo>>(2*_map71.size);
+          String _key72;
+          List<PrivilegeGrantInfo> _val73;
+          for (int _i74 = 0; _i74 < _map71.size; ++_i74)
+          {
+            _key72 = iprot.readString();
+            {
+              org.apache.thrift.protocol.TList _list75 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+              _val73 = new ArrayList<PrivilegeGrantInfo>(_list75.size);
+              PrivilegeGrantInfo _elem76;
+              for (int _i77 = 0; _i77 < _list75.size; ++_i77)
+              {
+                _elem76 = new PrivilegeGrantInfo();
+                _elem76.read(iprot);
+                _val73.add(_elem76);
+              }
+            }
+            struct.rolePrivileges.put(_key72, _val73);
+          }
+        }
+        struct.setRolePrivilegesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalType.java
new file mode 100644
index 0000000..82eb8fd
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalType.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum PrincipalType implements org.apache.thrift.TEnum {
+  USER(1),
+  ROLE(2),
+  GROUP(3);
+
+  private final int value;
+
+  private PrincipalType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static PrincipalType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return USER;
+      case 2:
+        return ROLE;
+      case 3:
+        return GROUP;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
new file mode 100644
index 0000000..a5c8969
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java
@@ -0,0 +1,449 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PrivilegeBag implements org.apache.thrift.TBase<PrivilegeBag, PrivilegeBag._Fields>, java.io.Serializable, Cloneable, Comparable<PrivilegeBag> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeBag");
+
+  private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PrivilegeBagStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PrivilegeBagTupleSchemeFactory());
+  }
+
+  private List<HiveObjectPrivilege> privileges; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRIVILEGES((short)1, "privileges");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRIVILEGES
+          return PRIVILEGES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HiveObjectPrivilege.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrivilegeBag.class, metaDataMap);
+  }
+
+  public PrivilegeBag() {
+  }
+
+  public PrivilegeBag(
+    List<HiveObjectPrivilege> privileges)
+  {
+    this();
+    this.privileges = privileges;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PrivilegeBag(PrivilegeBag other) {
+    if (other.isSetPrivileges()) {
+      List<HiveObjectPrivilege> __this__privileges = new ArrayList<HiveObjectPrivilege>(other.privileges.size());
+      for (HiveObjectPrivilege other_element : other.privileges) {
+        __this__privileges.add(new HiveObjectPrivilege(other_element));
+      }
+      this.privileges = __this__privileges;
+    }
+  }
+
+  public PrivilegeBag deepCopy() {
+    return new PrivilegeBag(this);
+  }
+
+  @Override
+  public void clear() {
+    this.privileges = null;
+  }
+
+  public int getPrivilegesSize() {
+    return (this.privileges == null) ? 0 : this.privileges.size();
+  }
+
+  public java.util.Iterator<HiveObjectPrivilege> getPrivilegesIterator() {
+    return (this.privileges == null) ? null : this.privileges.iterator();
+  }
+
+  public void addToPrivileges(HiveObjectPrivilege elem) {
+    if (this.privileges == null) {
+      this.privileges = new ArrayList<HiveObjectPrivilege>();
+    }
+    this.privileges.add(elem);
+  }
+
+  public List<HiveObjectPrivilege> getPrivileges() {
+    return this.privileges;
+  }
+
+  public void setPrivileges(List<HiveObjectPrivilege> privileges) {
+    this.privileges = privileges;
+  }
+
+  public void unsetPrivileges() {
+    this.privileges = null;
+  }
+
+  /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivileges() {
+    return this.privileges != null;
+  }
+
+  public void setPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.privileges = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRIVILEGES:
+      if (value == null) {
+        unsetPrivileges();
+      } else {
+        setPrivileges((List<HiveObjectPrivilege>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRIVILEGES:
+      return getPrivileges();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRIVILEGES:
+      return isSetPrivileges();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PrivilegeBag)
+      return this.equals((PrivilegeBag)that);
+    return false;
+  }
+
+  public boolean equals(PrivilegeBag that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_privileges = true && this.isSetPrivileges();
+    boolean that_present_privileges = true && that.isSetPrivileges();
+    if (this_present_privileges || that_present_privileges) {
+      if (!(this_present_privileges && that_present_privileges))
+        return false;
+      if (!this.privileges.equals(that.privileges))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_privileges = true && (isSetPrivileges());
+    list.add(present_privileges);
+    if (present_privileges)
+      list.add(privileges);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PrivilegeBag other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PrivilegeBag(");
+    boolean first = true;
+
+    sb.append("privileges:");
+    if (this.privileges == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.privileges);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PrivilegeBagStandardSchemeFactory implements SchemeFactory {
+    public PrivilegeBagStandardScheme getScheme() {
+      return new PrivilegeBagStandardScheme();
+    }
+  }
+
+  private static class PrivilegeBagStandardScheme extends StandardScheme<PrivilegeBag> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PrivilegeBag struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list16 = iprot.readListBegin();
+                struct.privileges = new ArrayList<HiveObjectPrivilege>(_list16.size);
+                HiveObjectPrivilege _elem17;
+                for (int _i18 = 0; _i18 < _list16.size; ++_i18)
+                {
+                  _elem17 = new HiveObjectPrivilege();
+                  _elem17.read(iprot);
+                  struct.privileges.add(_elem17);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PrivilegeBag struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.privileges != null) {
+        oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.privileges.size()));
+          for (HiveObjectPrivilege _iter19 : struct.privileges)
+          {
+            _iter19.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PrivilegeBagTupleSchemeFactory implements SchemeFactory {
+    public PrivilegeBagTupleScheme getScheme() {
+      return new PrivilegeBagTupleScheme();
+    }
+  }
+
+  private static class PrivilegeBagTupleScheme extends TupleScheme<PrivilegeBag> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PrivilegeBag struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPrivileges()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPrivileges()) {
+        {
+          oprot.writeI32(struct.privileges.size());
+          for (HiveObjectPrivilege _iter20 : struct.privileges)
+          {
+            _iter20.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PrivilegeBag struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.privileges = new ArrayList<HiveObjectPrivilege>(_list21.size);
+          HiveObjectPrivilege _elem22;
+          for (int _i23 = 0; _i23 < _list21.size; ++_i23)
+          {
+            _elem22 = new HiveObjectPrivilege();
+            _elem22.read(iprot);
+            struct.privileges.add(_elem22);
+          }
+        }
+        struct.setPrivilegesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
new file mode 100644
index 0000000..8ae118c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeGrantInfo.java
@@ -0,0 +1,815 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PrivilegeGrantInfo implements org.apache.thrift.TBase<PrivilegeGrantInfo, PrivilegeGrantInfo._Fields>, java.io.Serializable, Cloneable, Comparable<PrivilegeGrantInfo> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrivilegeGrantInfo");
+
+  private static final org.apache.thrift.protocol.TField PRIVILEGE_FIELD_DESC = new org.apache.thrift.protocol.TField("privilege", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField GRANTOR_FIELD_DESC = new org.apache.thrift.protocol.TField("grantor", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField GRANTOR_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("grantorType", org.apache.thrift.protocol.TType.I32, (short)4);
+  private static final org.apache.thrift.protocol.TField GRANT_OPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("grantOption", org.apache.thrift.protocol.TType.BOOL, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PrivilegeGrantInfoStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PrivilegeGrantInfoTupleSchemeFactory());
+  }
+
+  private String privilege; // required
+  private int createTime; // required
+  private String grantor; // required
+  private PrincipalType grantorType; // required
+  private boolean grantOption; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRIVILEGE((short)1, "privilege"),
+    CREATE_TIME((short)2, "createTime"),
+    GRANTOR((short)3, "grantor"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    GRANTOR_TYPE((short)4, "grantorType"),
+    GRANT_OPTION((short)5, "grantOption");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRIVILEGE
+          return PRIVILEGE;
+        case 2: // CREATE_TIME
+          return CREATE_TIME;
+        case 3: // GRANTOR
+          return GRANTOR;
+        case 4: // GRANTOR_TYPE
+          return GRANTOR_TYPE;
+        case 5: // GRANT_OPTION
+          return GRANT_OPTION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CREATETIME_ISSET_ID = 0;
+  private static final int __GRANTOPTION_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRIVILEGE, new org.apache.thrift.meta_data.FieldMetaData("privilege", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.GRANTOR, new org.apache.thrift.meta_data.FieldMetaData("grantor", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.GRANTOR_TYPE, new org.apache.thrift.meta_data.FieldMetaData("grantorType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.GRANT_OPTION, new org.apache.thrift.meta_data.FieldMetaData("grantOption", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrivilegeGrantInfo.class, metaDataMap);
+  }
+
+  public PrivilegeGrantInfo() {
+  }
+
+  public PrivilegeGrantInfo(
+    String privilege,
+    int createTime,
+    String grantor,
+    PrincipalType grantorType,
+    boolean grantOption)
+  {
+    this();
+    this.privilege = privilege;
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+    this.grantor = grantor;
+    this.grantorType = grantorType;
+    this.grantOption = grantOption;
+    setGrantOptionIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PrivilegeGrantInfo(PrivilegeGrantInfo other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetPrivilege()) {
+      this.privilege = other.privilege;
+    }
+    this.createTime = other.createTime;
+    if (other.isSetGrantor()) {
+      this.grantor = other.grantor;
+    }
+    if (other.isSetGrantorType()) {
+      this.grantorType = other.grantorType;
+    }
+    this.grantOption = other.grantOption;
+  }
+
+  public PrivilegeGrantInfo deepCopy() {
+    return new PrivilegeGrantInfo(this);
+  }
+
+  @Override
+  public void clear() {
+    this.privilege = null;
+    setCreateTimeIsSet(false);
+    this.createTime = 0;
+    this.grantor = null;
+    this.grantorType = null;
+    setGrantOptionIsSet(false);
+    this.grantOption = false;
+  }
+
+  public String getPrivilege() {
+    return this.privilege;
+  }
+
+  public void setPrivilege(String privilege) {
+    this.privilege = privilege;
+  }
+
+  public void unsetPrivilege() {
+    this.privilege = null;
+  }
+
+  /** Returns true if field privilege is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivilege() {
+    return this.privilege != null;
+  }
+
+  public void setPrivilegeIsSet(boolean value) {
+    if (!value) {
+      this.privilege = null;
+    }
+  }
+
+  public int getCreateTime() {
+    return this.createTime;
+  }
+
+  public void setCreateTime(int createTime) {
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+  }
+
+  public void unsetCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  public void setCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+  }
+
+  public String getGrantor() {
+    return this.grantor;
+  }
+
+  public void setGrantor(String grantor) {
+    this.grantor = grantor;
+  }
+
+  public void unsetGrantor() {
+    this.grantor = null;
+  }
+
+  /** Returns true if field grantor is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantor() {
+    return this.grantor != null;
+  }
+
+  public void setGrantorIsSet(boolean value) {
+    if (!value) {
+      this.grantor = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getGrantorType() {
+    return this.grantorType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setGrantorType(PrincipalType grantorType) {
+    this.grantorType = grantorType;
+  }
+
+  public void unsetGrantorType() {
+    this.grantorType = null;
+  }
+
+  /** Returns true if field grantorType is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantorType() {
+    return this.grantorType != null;
+  }
+
+  public void setGrantorTypeIsSet(boolean value) {
+    if (!value) {
+      this.grantorType = null;
+    }
+  }
+
+  public boolean isGrantOption() {
+    return this.grantOption;
+  }
+
+  public void setGrantOption(boolean grantOption) {
+    this.grantOption = grantOption;
+    setGrantOptionIsSet(true);
+  }
+
+  public void unsetGrantOption() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GRANTOPTION_ISSET_ID);
+  }
+
+  /** Returns true if field grantOption is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantOption() {
+    return EncodingUtils.testBit(__isset_bitfield, __GRANTOPTION_ISSET_ID);
+  }
+
+  public void setGrantOptionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GRANTOPTION_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRIVILEGE:
+      if (value == null) {
+        unsetPrivilege();
+      } else {
+        setPrivilege((String)value);
+      }
+      break;
+
+    case CREATE_TIME:
+      if (value == null) {
+        unsetCreateTime();
+      } else {
+        setCreateTime((Integer)value);
+      }
+      break;
+
+    case GRANTOR:
+      if (value == null) {
+        unsetGrantor();
+      } else {
+        setGrantor((String)value);
+      }
+      break;
+
+    case GRANTOR_TYPE:
+      if (value == null) {
+        unsetGrantorType();
+      } else {
+        setGrantorType((PrincipalType)value);
+      }
+      break;
+
+    case GRANT_OPTION:
+      if (value == null) {
+        unsetGrantOption();
+      } else {
+        setGrantOption((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRIVILEGE:
+      return getPrivilege();
+
+    case CREATE_TIME:
+      return getCreateTime();
+
+    case GRANTOR:
+      return getGrantor();
+
+    case GRANTOR_TYPE:
+      return getGrantorType();
+
+    case GRANT_OPTION:
+      return isGrantOption();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRIVILEGE:
+      return isSetPrivilege();
+    case CREATE_TIME:
+      return isSetCreateTime();
+    case GRANTOR:
+      return isSetGrantor();
+    case GRANTOR_TYPE:
+      return isSetGrantorType();
+    case GRANT_OPTION:
+      return isSetGrantOption();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PrivilegeGrantInfo)
+      return this.equals((PrivilegeGrantInfo)that);
+    return false;
+  }
+
+  public boolean equals(PrivilegeGrantInfo that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_privilege = true && this.isSetPrivilege();
+    boolean that_present_privilege = true && that.isSetPrivilege();
+    if (this_present_privilege || that_present_privilege) {
+      if (!(this_present_privilege && that_present_privilege))
+        return false;
+      if (!this.privilege.equals(that.privilege))
+        return false;
+    }
+
+    boolean this_present_createTime = true;
+    boolean that_present_createTime = true;
+    if (this_present_createTime || that_present_createTime) {
+      if (!(this_present_createTime && that_present_createTime))
+        return false;
+      if (this.createTime != that.createTime)
+        return false;
+    }
+
+    boolean this_present_grantor = true && this.isSetGrantor();
+    boolean that_present_grantor = true && that.isSetGrantor();
+    if (this_present_grantor || that_present_grantor) {
+      if (!(this_present_grantor && that_present_grantor))
+        return false;
+      if (!this.grantor.equals(that.grantor))
+        return false;
+    }
+
+    boolean this_present_grantorType = true && this.isSetGrantorType();
+    boolean that_present_grantorType = true && that.isSetGrantorType();
+    if (this_present_grantorType || that_present_grantorType) {
+      if (!(this_present_grantorType && that_present_grantorType))
+        return false;
+      if (!this.grantorType.equals(that.grantorType))
+        return false;
+    }
+
+    boolean this_present_grantOption = true;
+    boolean that_present_grantOption = true;
+    if (this_present_grantOption || that_present_grantOption) {
+      if (!(this_present_grantOption && that_present_grantOption))
+        return false;
+      if (this.grantOption != that.grantOption)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_privilege = true && (isSetPrivilege());
+    list.add(present_privilege);
+    if (present_privilege)
+      list.add(privilege);
+
+    boolean present_createTime = true;
+    list.add(present_createTime);
+    if (present_createTime)
+      list.add(createTime);
+
+    boolean present_grantor = true && (isSetGrantor());
+    list.add(present_grantor);
+    if (present_grantor)
+      list.add(grantor);
+
+    boolean present_grantorType = true && (isSetGrantorType());
+    list.add(present_grantorType);
+    if (present_grantorType)
+      list.add(grantorType.getValue());
+
+    boolean present_grantOption = true;
+    list.add(present_grantOption);
+    if (present_grantOption)
+      list.add(grantOption);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PrivilegeGrantInfo other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrivilege()).compareTo(other.isSetPrivilege());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivilege()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privilege, other.privilege);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantor()).compareTo(other.isSetGrantor());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantor()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantor, other.grantor);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantorType()).compareTo(other.isSetGrantorType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantorType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantorType, other.grantorType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantOption()).compareTo(other.isSetGrantOption());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantOption()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantOption, other.grantOption);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PrivilegeGrantInfo(");
+    boolean first = true;
+
+    sb.append("privilege:");
+    if (this.privilege == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.privilege);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("createTime:");
+    sb.append(this.createTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantor:");
+    if (this.grantor == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.grantor);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantorType:");
+    if (this.grantorType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.grantorType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantOption:");
+    sb.append(this.grantOption);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PrivilegeGrantInfoStandardSchemeFactory implements SchemeFactory {
+    public PrivilegeGrantInfoStandardScheme getScheme() {
+      return new PrivilegeGrantInfoStandardScheme();
+    }
+  }
+
+  private static class PrivilegeGrantInfoStandardScheme extends StandardScheme<PrivilegeGrantInfo> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PrivilegeGrantInfo struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRIVILEGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.privilege = iprot.readString();
+              struct.setPrivilegeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.createTime = iprot.readI32();
+              struct.setCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // GRANTOR
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.grantor = iprot.readString();
+              struct.setGrantorIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // GRANTOR_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.grantorType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setGrantorTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // GRANT_OPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.grantOption = iprot.readBool();
+              struct.setGrantOptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PrivilegeGrantInfo struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.privilege != null) {
+        oprot.writeFieldBegin(PRIVILEGE_FIELD_DESC);
+        oprot.writeString(struct.privilege);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+      oprot.writeI32(struct.createTime);
+      oprot.writeFieldEnd();
+      if (struct.grantor != null) {
+        oprot.writeFieldBegin(GRANTOR_FIELD_DESC);
+        oprot.writeString(struct.grantor);
+        oprot.writeFieldEnd();
+      }
+      if (struct.grantorType != null) {
+        oprot.writeFieldBegin(GRANTOR_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.grantorType.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(GRANT_OPTION_FIELD_DESC);
+      oprot.writeBool(struct.grantOption);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PrivilegeGrantInfoTupleSchemeFactory implements SchemeFactory {
+    public PrivilegeGrantInfoTupleScheme getScheme() {
+      return new PrivilegeGrantInfoTupleScheme();
+    }
+  }
+
+  private static class PrivilegeGrantInfoTupleScheme extends TupleScheme<PrivilegeGrantInfo> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PrivilegeGrantInfo struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPrivilege()) {
+        optionals.set(0);
+      }
+      if (struct.isSetCreateTime()) {
+        optionals.set(1);
+      }
+      if (struct.isSetGrantor()) {
+        optionals.set(2);
+      }
+      if (struct.isSetGrantorType()) {
+        optionals.set(3);
+      }
+      if (struct.isSetGrantOption()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetPrivilege()) {
+        oprot.writeString(struct.privilege);
+      }
+      if (struct.isSetCreateTime()) {
+        oprot.writeI32(struct.createTime);
+      }
+      if (struct.isSetGrantor()) {
+        oprot.writeString(struct.grantor);
+      }
+      if (struct.isSetGrantorType()) {
+        oprot.writeI32(struct.grantorType.getValue());
+      }
+      if (struct.isSetGrantOption()) {
+        oprot.writeBool(struct.grantOption);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PrivilegeGrantInfo struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.privilege = iprot.readString();
+        struct.setPrivilegeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.createTime = iprot.readI32();
+        struct.setCreateTimeIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.grantor = iprot.readString();
+        struct.setGrantorIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.grantorType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setGrantorTypeIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.grantOption = iprot.readBool();
+        struct.setGrantOptionIsSet(true);
+      }
+    }
+  }
+
+}
+


[34/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
new file mode 100644
index 0000000..611bf6f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CreationMetadata.java
@@ -0,0 +1,851 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CreationMetadata implements org.apache.thrift.TBase<CreationMetadata, CreationMetadata._Fields>, java.io.Serializable, Cloneable, Comparable<CreationMetadata> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CreationMetadata");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField TABLES_USED_FIELD_DESC = new org.apache.thrift.protocol.TField("tablesUsed", org.apache.thrift.protocol.TType.SET, (short)4);
+  private static final org.apache.thrift.protocol.TField VALID_TXN_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validTxnList", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CreationMetadataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CreationMetadataTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String dbName; // required
+  private String tblName; // required
+  private Set<String> tablesUsed; // required
+  private String validTxnList; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    DB_NAME((short)2, "dbName"),
+    TBL_NAME((short)3, "tblName"),
+    TABLES_USED((short)4, "tablesUsed"),
+    VALID_TXN_LIST((short)5, "validTxnList");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TBL_NAME
+          return TBL_NAME;
+        case 4: // TABLES_USED
+          return TABLES_USED;
+        case 5: // VALID_TXN_LIST
+          return VALID_TXN_LIST;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.VALID_TXN_LIST};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLES_USED, new org.apache.thrift.meta_data.FieldMetaData("tablesUsed", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.SetMetaData(org.apache.thrift.protocol.TType.SET, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.VALID_TXN_LIST, new org.apache.thrift.meta_data.FieldMetaData("validTxnList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CreationMetadata.class, metaDataMap);
+  }
+
+  public CreationMetadata() {
+  }
+
+  public CreationMetadata(
+    String catName,
+    String dbName,
+    String tblName,
+    Set<String> tablesUsed)
+  {
+    this();
+    this.catName = catName;
+    this.dbName = dbName;
+    this.tblName = tblName;
+    this.tablesUsed = tablesUsed;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CreationMetadata(CreationMetadata other) {
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetTablesUsed()) {
+      Set<String> __this__tablesUsed = new HashSet<String>(other.tablesUsed);
+      this.tablesUsed = __this__tablesUsed;
+    }
+    if (other.isSetValidTxnList()) {
+      this.validTxnList = other.validTxnList;
+    }
+  }
+
+  public CreationMetadata deepCopy() {
+    return new CreationMetadata(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.dbName = null;
+    this.tblName = null;
+    this.tablesUsed = null;
+    this.validTxnList = null;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public int getTablesUsedSize() {
+    return (this.tablesUsed == null) ? 0 : this.tablesUsed.size();
+  }
+
+  public java.util.Iterator<String> getTablesUsedIterator() {
+    return (this.tablesUsed == null) ? null : this.tablesUsed.iterator();
+  }
+
+  public void addToTablesUsed(String elem) {
+    if (this.tablesUsed == null) {
+      this.tablesUsed = new HashSet<String>();
+    }
+    this.tablesUsed.add(elem);
+  }
+
+  public Set<String> getTablesUsed() {
+    return this.tablesUsed;
+  }
+
+  public void setTablesUsed(Set<String> tablesUsed) {
+    this.tablesUsed = tablesUsed;
+  }
+
+  public void unsetTablesUsed() {
+    this.tablesUsed = null;
+  }
+
+  /** Returns true if field tablesUsed is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablesUsed() {
+    return this.tablesUsed != null;
+  }
+
+  public void setTablesUsedIsSet(boolean value) {
+    if (!value) {
+      this.tablesUsed = null;
+    }
+  }
+
+  public String getValidTxnList() {
+    return this.validTxnList;
+  }
+
+  public void setValidTxnList(String validTxnList) {
+    this.validTxnList = validTxnList;
+  }
+
+  public void unsetValidTxnList() {
+    this.validTxnList = null;
+  }
+
+  /** Returns true if field validTxnList is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidTxnList() {
+    return this.validTxnList != null;
+  }
+
+  public void setValidTxnListIsSet(boolean value) {
+    if (!value) {
+      this.validTxnList = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case TABLES_USED:
+      if (value == null) {
+        unsetTablesUsed();
+      } else {
+        setTablesUsed((Set<String>)value);
+      }
+      break;
+
+    case VALID_TXN_LIST:
+      if (value == null) {
+        unsetValidTxnList();
+      } else {
+        setValidTxnList((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case TABLES_USED:
+      return getTablesUsed();
+
+    case VALID_TXN_LIST:
+      return getValidTxnList();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case TABLES_USED:
+      return isSetTablesUsed();
+    case VALID_TXN_LIST:
+      return isSetValidTxnList();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CreationMetadata)
+      return this.equals((CreationMetadata)that);
+    return false;
+  }
+
+  public boolean equals(CreationMetadata that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_tablesUsed = true && this.isSetTablesUsed();
+    boolean that_present_tablesUsed = true && that.isSetTablesUsed();
+    if (this_present_tablesUsed || that_present_tablesUsed) {
+      if (!(this_present_tablesUsed && that_present_tablesUsed))
+        return false;
+      if (!this.tablesUsed.equals(that.tablesUsed))
+        return false;
+    }
+
+    boolean this_present_validTxnList = true && this.isSetValidTxnList();
+    boolean that_present_validTxnList = true && that.isSetValidTxnList();
+    if (this_present_validTxnList || that_present_validTxnList) {
+      if (!(this_present_validTxnList && that_present_validTxnList))
+        return false;
+      if (!this.validTxnList.equals(that.validTxnList))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_tablesUsed = true && (isSetTablesUsed());
+    list.add(present_tablesUsed);
+    if (present_tablesUsed)
+      list.add(tablesUsed);
+
+    boolean present_validTxnList = true && (isSetValidTxnList());
+    list.add(present_validTxnList);
+    if (present_validTxnList)
+      list.add(validTxnList);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CreationMetadata other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTablesUsed()).compareTo(other.isSetTablesUsed());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablesUsed()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablesUsed, other.tablesUsed);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidTxnList()).compareTo(other.isSetValidTxnList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidTxnList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validTxnList, other.validTxnList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CreationMetadata(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tablesUsed:");
+    if (this.tablesUsed == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tablesUsed);
+    }
+    first = false;
+    if (isSetValidTxnList()) {
+      if (!first) sb.append(", ");
+      sb.append("validTxnList:");
+      if (this.validTxnList == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.validTxnList);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetCatName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTablesUsed()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablesUsed' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CreationMetadataStandardSchemeFactory implements SchemeFactory {
+    public CreationMetadataStandardScheme getScheme() {
+      return new CreationMetadataStandardScheme();
+    }
+  }
+
+  private static class CreationMetadataStandardScheme extends StandardScheme<CreationMetadata> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CreationMetadata struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TABLES_USED
+            if (schemeField.type == org.apache.thrift.protocol.TType.SET) {
+              {
+                org.apache.thrift.protocol.TSet _set716 = iprot.readSetBegin();
+                struct.tablesUsed = new HashSet<String>(2*_set716.size);
+                String _elem717;
+                for (int _i718 = 0; _i718 < _set716.size; ++_i718)
+                {
+                  _elem717 = iprot.readString();
+                  struct.tablesUsed.add(_elem717);
+                }
+                iprot.readSetEnd();
+              }
+              struct.setTablesUsedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // VALID_TXN_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.validTxnList = iprot.readString();
+              struct.setValidTxnListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CreationMetadata struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tablesUsed != null) {
+        oprot.writeFieldBegin(TABLES_USED_FIELD_DESC);
+        {
+          oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, struct.tablesUsed.size()));
+          for (String _iter719 : struct.tablesUsed)
+          {
+            oprot.writeString(_iter719);
+          }
+          oprot.writeSetEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.validTxnList != null) {
+        if (struct.isSetValidTxnList()) {
+          oprot.writeFieldBegin(VALID_TXN_LIST_FIELD_DESC);
+          oprot.writeString(struct.validTxnList);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CreationMetadataTupleSchemeFactory implements SchemeFactory {
+    public CreationMetadataTupleScheme getScheme() {
+      return new CreationMetadataTupleScheme();
+    }
+  }
+
+  private static class CreationMetadataTupleScheme extends TupleScheme<CreationMetadata> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CreationMetadata struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.catName);
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      {
+        oprot.writeI32(struct.tablesUsed.size());
+        for (String _iter720 : struct.tablesUsed)
+        {
+          oprot.writeString(_iter720);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetValidTxnList()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetValidTxnList()) {
+        oprot.writeString(struct.validTxnList);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CreationMetadata struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.catName = iprot.readString();
+      struct.setCatNameIsSet(true);
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      {
+        org.apache.thrift.protocol.TSet _set721 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.tablesUsed = new HashSet<String>(2*_set721.size);
+        String _elem722;
+        for (int _i723 = 0; _i723 < _set721.size; ++_i723)
+        {
+          _elem722 = iprot.readString();
+          struct.tablesUsed.add(_elem722);
+        }
+      }
+      struct.setTablesUsedIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.validTxnList = iprot.readString();
+        struct.setValidTxnListIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
new file mode 100644
index 0000000..536829f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CurrentNotificationEventId.java
@@ -0,0 +1,387 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CurrentNotificationEventId implements org.apache.thrift.TBase<CurrentNotificationEventId, CurrentNotificationEventId._Fields>, java.io.Serializable, Cloneable, Comparable<CurrentNotificationEventId> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CurrentNotificationEventId");
+
+  private static final org.apache.thrift.protocol.TField EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("eventId", org.apache.thrift.protocol.TType.I64, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CurrentNotificationEventIdStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CurrentNotificationEventIdTupleSchemeFactory());
+  }
+
+  private long eventId; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    EVENT_ID((short)1, "eventId");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // EVENT_ID
+          return EVENT_ID;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __EVENTID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.EVENT_ID, new org.apache.thrift.meta_data.FieldMetaData("eventId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CurrentNotificationEventId.class, metaDataMap);
+  }
+
+  public CurrentNotificationEventId() {
+  }
+
+  public CurrentNotificationEventId(
+    long eventId)
+  {
+    this();
+    this.eventId = eventId;
+    setEventIdIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CurrentNotificationEventId(CurrentNotificationEventId other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.eventId = other.eventId;
+  }
+
+  public CurrentNotificationEventId deepCopy() {
+    return new CurrentNotificationEventId(this);
+  }
+
+  @Override
+  public void clear() {
+    setEventIdIsSet(false);
+    this.eventId = 0;
+  }
+
+  public long getEventId() {
+    return this.eventId;
+  }
+
+  public void setEventId(long eventId) {
+    this.eventId = eventId;
+    setEventIdIsSet(true);
+  }
+
+  public void unsetEventId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EVENTID_ISSET_ID);
+  }
+
+  /** Returns true if field eventId is set (has been assigned a value) and false otherwise */
+  public boolean isSetEventId() {
+    return EncodingUtils.testBit(__isset_bitfield, __EVENTID_ISSET_ID);
+  }
+
+  public void setEventIdIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EVENTID_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case EVENT_ID:
+      if (value == null) {
+        unsetEventId();
+      } else {
+        setEventId((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case EVENT_ID:
+      return getEventId();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case EVENT_ID:
+      return isSetEventId();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CurrentNotificationEventId)
+      return this.equals((CurrentNotificationEventId)that);
+    return false;
+  }
+
+  public boolean equals(CurrentNotificationEventId that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_eventId = true;
+    boolean that_present_eventId = true;
+    if (this_present_eventId || that_present_eventId) {
+      if (!(this_present_eventId && that_present_eventId))
+        return false;
+      if (this.eventId != that.eventId)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_eventId = true;
+    list.add(present_eventId);
+    if (present_eventId)
+      list.add(eventId);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CurrentNotificationEventId other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetEventId()).compareTo(other.isSetEventId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEventId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventId, other.eventId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CurrentNotificationEventId(");
+    boolean first = true;
+
+    sb.append("eventId:");
+    sb.append(this.eventId);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetEventId()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'eventId' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CurrentNotificationEventIdStandardSchemeFactory implements SchemeFactory {
+    public CurrentNotificationEventIdStandardScheme getScheme() {
+      return new CurrentNotificationEventIdStandardScheme();
+    }
+  }
+
+  private static class CurrentNotificationEventIdStandardScheme extends StandardScheme<CurrentNotificationEventId> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CurrentNotificationEventId struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // EVENT_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.eventId = iprot.readI64();
+              struct.setEventIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CurrentNotificationEventId struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(EVENT_ID_FIELD_DESC);
+      oprot.writeI64(struct.eventId);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CurrentNotificationEventIdTupleSchemeFactory implements SchemeFactory {
+    public CurrentNotificationEventIdTupleScheme getScheme() {
+      return new CurrentNotificationEventIdTupleScheme();
+    }
+  }
+
+  private static class CurrentNotificationEventIdTupleScheme extends TupleScheme<CurrentNotificationEventId> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CurrentNotificationEventId struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.eventId);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CurrentNotificationEventId struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.eventId = iprot.readI64();
+      struct.setEventIdIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DataOperationType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DataOperationType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DataOperationType.java
new file mode 100644
index 0000000..15a6e9a
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DataOperationType.java
@@ -0,0 +1,57 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum DataOperationType implements org.apache.thrift.TEnum {
+  SELECT(1),
+  INSERT(2),
+  UPDATE(3),
+  DELETE(4),
+  UNSET(5),
+  NO_TXN(6);
+
+  private final int value;
+
+  private DataOperationType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static DataOperationType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return SELECT;
+      case 2:
+        return INSERT;
+      case 3:
+        return UPDATE;
+      case 4:
+        return DELETE;
+      case 5:
+        return UNSET;
+      case 6:
+        return NO_TXN;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
new file mode 100644
index 0000000..9cde9b8
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java
@@ -0,0 +1,1201 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Database implements org.apache.thrift.TBase<Database, Database._Fields>, java.io.Serializable, Cloneable, Comparable<Database> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Database");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField LOCATION_URI_FIELD_DESC = new org.apache.thrift.protocol.TField("locationUri", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)4);
+  private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)5);
+  private static final org.apache.thrift.protocol.TField OWNER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerName", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)7);
+  private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)8);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DatabaseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DatabaseTupleSchemeFactory());
+  }
+
+  private String name; // required
+  private String description; // required
+  private String locationUri; // required
+  private Map<String,String> parameters; // required
+  private PrincipalPrivilegeSet privileges; // optional
+  private String ownerName; // optional
+  private PrincipalType ownerType; // optional
+  private String catalogName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name"),
+    DESCRIPTION((short)2, "description"),
+    LOCATION_URI((short)3, "locationUri"),
+    PARAMETERS((short)4, "parameters"),
+    PRIVILEGES((short)5, "privileges"),
+    OWNER_NAME((short)6, "ownerName"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    OWNER_TYPE((short)7, "ownerType"),
+    CATALOG_NAME((short)8, "catalogName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        case 2: // DESCRIPTION
+          return DESCRIPTION;
+        case 3: // LOCATION_URI
+          return LOCATION_URI;
+        case 4: // PARAMETERS
+          return PARAMETERS;
+        case 5: // PRIVILEGES
+          return PRIVILEGES;
+        case 6: // OWNER_NAME
+          return OWNER_NAME;
+        case 7: // OWNER_TYPE
+          return OWNER_TYPE;
+        case 8: // CATALOG_NAME
+          return CATALOG_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.OWNER_NAME,_Fields.OWNER_TYPE,_Fields.CATALOG_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
+    tmpMap.put(_Fields.OWNER_NAME, new org.apache.thrift.meta_data.FieldMetaData("ownerName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Database.class, metaDataMap);
+  }
+
+  public Database() {
+  }
+
+  public Database(
+    String name,
+    String description,
+    String locationUri,
+    Map<String,String> parameters)
+  {
+    this();
+    this.name = name;
+    this.description = description;
+    this.locationUri = locationUri;
+    this.parameters = parameters;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Database(Database other) {
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+    if (other.isSetLocationUri()) {
+      this.locationUri = other.locationUri;
+    }
+    if (other.isSetParameters()) {
+      Map<String,String> __this__parameters = new HashMap<String,String>(other.parameters);
+      this.parameters = __this__parameters;
+    }
+    if (other.isSetPrivileges()) {
+      this.privileges = new PrincipalPrivilegeSet(other.privileges);
+    }
+    if (other.isSetOwnerName()) {
+      this.ownerName = other.ownerName;
+    }
+    if (other.isSetOwnerType()) {
+      this.ownerType = other.ownerType;
+    }
+    if (other.isSetCatalogName()) {
+      this.catalogName = other.catalogName;
+    }
+  }
+
+  public Database deepCopy() {
+    return new Database(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+    this.description = null;
+    this.locationUri = null;
+    this.parameters = null;
+    this.privileges = null;
+    this.ownerName = null;
+    this.ownerType = null;
+    this.catalogName = null;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public String getDescription() {
+    return this.description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public String getLocationUri() {
+    return this.locationUri;
+  }
+
+  public void setLocationUri(String locationUri) {
+    this.locationUri = locationUri;
+  }
+
+  public void unsetLocationUri() {
+    this.locationUri = null;
+  }
+
+  /** Returns true if field locationUri is set (has been assigned a value) and false otherwise */
+  public boolean isSetLocationUri() {
+    return this.locationUri != null;
+  }
+
+  public void setLocationUriIsSet(boolean value) {
+    if (!value) {
+      this.locationUri = null;
+    }
+  }
+
+  public int getParametersSize() {
+    return (this.parameters == null) ? 0 : this.parameters.size();
+  }
+
+  public void putToParameters(String key, String val) {
+    if (this.parameters == null) {
+      this.parameters = new HashMap<String,String>();
+    }
+    this.parameters.put(key, val);
+  }
+
+  public Map<String,String> getParameters() {
+    return this.parameters;
+  }
+
+  public void setParameters(Map<String,String> parameters) {
+    this.parameters = parameters;
+  }
+
+  public void unsetParameters() {
+    this.parameters = null;
+  }
+
+  /** Returns true if field parameters is set (has been assigned a value) and false otherwise */
+  public boolean isSetParameters() {
+    return this.parameters != null;
+  }
+
+  public void setParametersIsSet(boolean value) {
+    if (!value) {
+      this.parameters = null;
+    }
+  }
+
+  public PrincipalPrivilegeSet getPrivileges() {
+    return this.privileges;
+  }
+
+  public void setPrivileges(PrincipalPrivilegeSet privileges) {
+    this.privileges = privileges;
+  }
+
+  public void unsetPrivileges() {
+    this.privileges = null;
+  }
+
+  /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivileges() {
+    return this.privileges != null;
+  }
+
+  public void setPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.privileges = null;
+    }
+  }
+
+  public String getOwnerName() {
+    return this.ownerName;
+  }
+
+  public void setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+  }
+
+  public void unsetOwnerName() {
+    this.ownerName = null;
+  }
+
+  /** Returns true if field ownerName is set (has been assigned a value) and false otherwise */
+  public boolean isSetOwnerName() {
+    return this.ownerName != null;
+  }
+
+  public void setOwnerNameIsSet(boolean value) {
+    if (!value) {
+      this.ownerName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getOwnerType() {
+    return this.ownerType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setOwnerType(PrincipalType ownerType) {
+    this.ownerType = ownerType;
+  }
+
+  public void unsetOwnerType() {
+    this.ownerType = null;
+  }
+
+  /** Returns true if field ownerType is set (has been assigned a value) and false otherwise */
+  public boolean isSetOwnerType() {
+    return this.ownerType != null;
+  }
+
+  public void setOwnerTypeIsSet(boolean value) {
+    if (!value) {
+      this.ownerType = null;
+    }
+  }
+
+  public String getCatalogName() {
+    return this.catalogName;
+  }
+
+  public void setCatalogName(String catalogName) {
+    this.catalogName = catalogName;
+  }
+
+  public void unsetCatalogName() {
+    this.catalogName = null;
+  }
+
+  /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatalogName() {
+    return this.catalogName != null;
+  }
+
+  public void setCatalogNameIsSet(boolean value) {
+    if (!value) {
+      this.catalogName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    case LOCATION_URI:
+      if (value == null) {
+        unsetLocationUri();
+      } else {
+        setLocationUri((String)value);
+      }
+      break;
+
+    case PARAMETERS:
+      if (value == null) {
+        unsetParameters();
+      } else {
+        setParameters((Map<String,String>)value);
+      }
+      break;
+
+    case PRIVILEGES:
+      if (value == null) {
+        unsetPrivileges();
+      } else {
+        setPrivileges((PrincipalPrivilegeSet)value);
+      }
+      break;
+
+    case OWNER_NAME:
+      if (value == null) {
+        unsetOwnerName();
+      } else {
+        setOwnerName((String)value);
+      }
+      break;
+
+    case OWNER_TYPE:
+      if (value == null) {
+        unsetOwnerType();
+      } else {
+        setOwnerType((PrincipalType)value);
+      }
+      break;
+
+    case CATALOG_NAME:
+      if (value == null) {
+        unsetCatalogName();
+      } else {
+        setCatalogName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    case DESCRIPTION:
+      return getDescription();
+
+    case LOCATION_URI:
+      return getLocationUri();
+
+    case PARAMETERS:
+      return getParameters();
+
+    case PRIVILEGES:
+      return getPrivileges();
+
+    case OWNER_NAME:
+      return getOwnerName();
+
+    case OWNER_TYPE:
+      return getOwnerType();
+
+    case CATALOG_NAME:
+      return getCatalogName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    case DESCRIPTION:
+      return isSetDescription();
+    case LOCATION_URI:
+      return isSetLocationUri();
+    case PARAMETERS:
+      return isSetParameters();
+    case PRIVILEGES:
+      return isSetPrivileges();
+    case OWNER_NAME:
+      return isSetOwnerName();
+    case OWNER_TYPE:
+      return isSetOwnerType();
+    case CATALOG_NAME:
+      return isSetCatalogName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Database)
+      return this.equals((Database)that);
+    return false;
+  }
+
+  public boolean equals(Database that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    boolean this_present_locationUri = true && this.isSetLocationUri();
+    boolean that_present_locationUri = true && that.isSetLocationUri();
+    if (this_present_locationUri || that_present_locationUri) {
+      if (!(this_present_locationUri && that_present_locationUri))
+        return false;
+      if (!this.locationUri.equals(that.locationUri))
+        return false;
+    }
+
+    boolean this_present_parameters = true && this.isSetParameters();
+    boolean that_present_parameters = true && that.isSetParameters();
+    if (this_present_parameters || that_present_parameters) {
+      if (!(this_present_parameters && that_present_parameters))
+        return false;
+      if (!this.parameters.equals(that.parameters))
+        return false;
+    }
+
+    boolean this_present_privileges = true && this.isSetPrivileges();
+    boolean that_present_privileges = true && that.isSetPrivileges();
+    if (this_present_privileges || that_present_privileges) {
+      if (!(this_present_privileges && that_present_privileges))
+        return false;
+      if (!this.privileges.equals(that.privileges))
+        return false;
+    }
+
+    boolean this_present_ownerName = true && this.isSetOwnerName();
+    boolean that_present_ownerName = true && that.isSetOwnerName();
+    if (this_present_ownerName || that_present_ownerName) {
+      if (!(this_present_ownerName && that_present_ownerName))
+        return false;
+      if (!this.ownerName.equals(that.ownerName))
+        return false;
+    }
+
+    boolean this_present_ownerType = true && this.isSetOwnerType();
+    boolean that_present_ownerType = true && that.isSetOwnerType();
+    if (this_present_ownerType || that_present_ownerType) {
+      if (!(this_present_ownerType && that_present_ownerType))
+        return false;
+      if (!this.ownerType.equals(that.ownerType))
+        return false;
+    }
+
+    boolean this_present_catalogName = true && this.isSetCatalogName();
+    boolean that_present_catalogName = true && that.isSetCatalogName();
+    if (this_present_catalogName || that_present_catalogName) {
+      if (!(this_present_catalogName && that_present_catalogName))
+        return false;
+      if (!this.catalogName.equals(that.catalogName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_description = true && (isSetDescription());
+    list.add(present_description);
+    if (present_description)
+      list.add(description);
+
+    boolean present_locationUri = true && (isSetLocationUri());
+    list.add(present_locationUri);
+    if (present_locationUri)
+      list.add(locationUri);
+
+    boolean present_parameters = true && (isSetParameters());
+    list.add(present_parameters);
+    if (present_parameters)
+      list.add(parameters);
+
+    boolean present_privileges = true && (isSetPrivileges());
+    list.add(present_privileges);
+    if (present_privileges)
+      list.add(privileges);
+
+    boolean present_ownerName = true && (isSetOwnerName());
+    list.add(present_ownerName);
+    if (present_ownerName)
+      list.add(ownerName);
+
+    boolean present_ownerType = true && (isSetOwnerType());
+    list.add(present_ownerType);
+    if (present_ownerType)
+      list.add(ownerType.getValue());
+
+    boolean present_catalogName = true && (isSetCatalogName());
+    list.add(present_catalogName);
+    if (present_catalogName)
+      list.add(catalogName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Database other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetLocationUri()).compareTo(other.isSetLocationUri());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLocationUri()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.locationUri, other.locationUri);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetParameters()).compareTo(other.isSetParameters());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParameters()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, other.parameters);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOwnerName()).compareTo(other.isSetOwnerName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOwnerName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ownerName, other.ownerName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOwnerType()).compareTo(other.isSetOwnerType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOwnerType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ownerType, other.ownerType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatalogName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Database(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("description:");
+    if (this.description == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.description);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("locationUri:");
+    if (this.locationUri == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.locationUri);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("parameters:");
+    if (this.parameters == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parameters);
+    }
+    first = false;
+    if (isSetPrivileges()) {
+      if (!first) sb.append(", ");
+      sb.append("privileges:");
+      if (this.privileges == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.privileges);
+      }
+      first = false;
+    }
+    if (isSetOwnerName()) {
+      if (!first) sb.append(", ");
+      sb.append("ownerName:");
+      if (this.ownerName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.ownerName);
+      }
+      first = false;
+    }
+    if (isSetOwnerType()) {
+      if (!first) sb.append(", ");
+      sb.append("ownerType:");
+      if (this.ownerType == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.ownerType);
+      }
+      first = false;
+    }
+    if (isSetCatalogName()) {
+      if (!first) sb.append(", ");
+      sb.append("catalogName:");
+      if (this.catalogName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catalogName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (privileges != null) {
+      privileges.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DatabaseStandardSchemeFactory implements SchemeFactory {
+    public DatabaseStandardScheme getScheme() {
+      return new DatabaseStandardScheme();
+    }
+  }
+
+  private static class DatabaseStandardScheme extends StandardScheme<Database> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Database struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // LOCATION_URI
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.locationUri = iprot.readString();
+              struct.setLocationUriIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PARAMETERS
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map102 = iprot.readMapBegin();
+                struct.parameters = new HashMap<String,String>(2*_map102.size);
+                String _key103;
+                String _val104;
+                for (int _i105 = 0; _i105 < _map102.size; ++_i105)
+                {
+                  _key103 = iprot.readString();
+                  _val104 = iprot.readString();
+                  struct.parameters.put(_key103, _val104);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setParametersIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.privileges = new PrincipalPrivilegeSet();
+              struct.privileges.read(iprot);
+              struct.setPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // OWNER_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.ownerName = iprot.readString();
+              struct.setOwnerNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // OWNER_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setOwnerTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // CATALOG_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catalogName = iprot.readString();
+              struct.setCatalogNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Database struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.description != null) {
+        oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+        oprot.writeString(struct.description);
+        oprot.writeFieldEnd();
+      }
+      if (struct.locationUri != null) {
+        oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC);
+        oprot.writeString(struct.locationUri);
+        oprot.writeFieldEnd();
+      }
+      if (struct.parameters != null) {
+        oprot.writeFieldBegin(PARAMETERS_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size()));
+          for (Map.Entry<String, String> _iter106 : struct.parameters.entrySet())
+          {
+            oprot.writeString(_iter106.getKey());
+            oprot.writeString(_iter106.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.privileges != null) {
+        if (struct.isSetPrivileges()) {
+          oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+          struct.privileges.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.ownerName != null) {
+        if (struct.isSetOwnerName()) {
+          oprot.writeFieldBegin(OWNER_NAME_FIELD_DESC);
+          oprot.writeString(struct.ownerName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.ownerType != null) {
+        if (struct.isSetOwnerType()) {
+          oprot.writeFieldBegin(OWNER_TYPE_FIELD_DESC);
+          oprot.writeI32(struct.ownerType.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catalogName != null) {
+        if (struct.isSetCatalogName()) {
+          oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC);
+          oprot.writeString(struct.catalogName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DatabaseTupleSchemeFactory implements SchemeFactory {
+    public DatabaseTupleScheme getScheme() {
+      return new DatabaseTupleScheme();
+    }
+  }
+
+  private static class DatabaseTupleScheme extends TupleScheme<Database> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetDescription()) {
+        optionals.set(1);
+      }
+      if (struct.isSetLocationUri()) {
+        optionals.set(2);
+      }
+      if (struct.isSetParameters()) {
+        optionals.set(3);
+      }
+      if (struct.isSetPrivileges()) {
+        optionals.set(4);
+      }
+      if (struct.isSetOwnerName()) {
+        optionals.set(5);
+      }
+      if (struct.isSetOwnerType()) {
+        optionals.set(6);
+      }
+      if (struct.isSetCatalogName()) {
+        optionals.set(7);
+      }
+      oprot.writeBitSet(optionals, 8);
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+      if (struct.isSetLocationUri()) {
+        oprot.writeString(struct.locationUri);
+      }
+      if (struct.isSetParameters()) {
+        {
+          oprot.writeI32(struct.parameters.size());
+          for (Map.Entry<String, String> _iter107 : struct.parameters.entrySet())
+          {
+            oprot.writeString(_iter107.getKey());
+            oprot.writeString(_iter107.getValue());
+          }
+        }
+      }
+      if (struct.isSetPrivileges()) {
+        struct.privileges.write(oprot);
+      }
+      if (struct.isSetOwnerName()) {
+        oprot.writeString(struct.ownerName);
+      }
+      if (struct.isSetOwnerType()) {
+        oprot.writeI32(struct.ownerType.getValue());
+      }
+      if (struct.isSetCatalogName()) {
+        oprot.writeString(struct.catalogName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(8);
+      if (incoming.get(0)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.locationUri = iprot.readString();
+        struct.setLocationUriIsSet(true);
+      }
+      if (incoming.get(3)) {
+        {
+          org.apache.thrift.protocol.TMap _map108 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.parameters = new HashMap<String,String>(2*_map108.size);
+          String _key109;
+          String _val110;
+          for (int _i111 = 0; _i111 < _map108.size; ++_i111)
+          {
+            _key109 = iprot.readString();
+            _val110 = iprot.readString();
+            struct.parameters.put(_key109, _val110);
+          }
+        }
+        struct.setParametersIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.privileges = new PrincipalPrivilegeSet();
+        struct.privileges.read(iprot);
+        struct.setPrivilegesIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.ownerName = iprot.readString();
+        struct.setOwnerNameIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setOwnerTypeIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.catalogName = iprot.readString();
+        struct.setCatalogNameIsSet(true);
+      }
+    }
+  }
+
+}
+


[85/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
index 0000000,4caec8f..fe6130c
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
@@@ -1,0 -1,490 +1,597 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionsStatsResult implements org.apache.thrift.TBase<PartitionsStatsResult, PartitionsStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsResult> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult");
+ 
+   private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new PartitionsStatsResultStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new PartitionsStatsResultTupleSchemeFactory());
+   }
+ 
+   private Map<String,List<ColumnStatisticsObj>> partStats; // required
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
 -    PART_STATS((short)1, "partStats");
++    PART_STATS((short)1, "partStats"),
++    IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // PART_STATS
+           return PART_STATS;
++        case 2: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.PART_STATS, new org.apache.thrift.meta_data.FieldMetaData("partStats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+             new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                 new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap);
+   }
+ 
+   public PartitionsStatsResult() {
+   }
+ 
+   public PartitionsStatsResult(
+     Map<String,List<ColumnStatisticsObj>> partStats)
+   {
+     this();
+     this.partStats = partStats;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public PartitionsStatsResult(PartitionsStatsResult other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetPartStats()) {
+       Map<String,List<ColumnStatisticsObj>> __this__partStats = new HashMap<String,List<ColumnStatisticsObj>>(other.partStats.size());
+       for (Map.Entry<String, List<ColumnStatisticsObj>> other_element : other.partStats.entrySet()) {
+ 
+         String other_element_key = other_element.getKey();
+         List<ColumnStatisticsObj> other_element_value = other_element.getValue();
+ 
+         String __this__partStats_copy_key = other_element_key;
+ 
+         List<ColumnStatisticsObj> __this__partStats_copy_value = new ArrayList<ColumnStatisticsObj>(other_element_value.size());
+         for (ColumnStatisticsObj other_element_value_element : other_element_value) {
+           __this__partStats_copy_value.add(new ColumnStatisticsObj(other_element_value_element));
+         }
+ 
+         __this__partStats.put(__this__partStats_copy_key, __this__partStats_copy_value);
+       }
+       this.partStats = __this__partStats;
+     }
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public PartitionsStatsResult deepCopy() {
+     return new PartitionsStatsResult(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.partStats = null;
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public int getPartStatsSize() {
+     return (this.partStats == null) ? 0 : this.partStats.size();
+   }
+ 
+   public void putToPartStats(String key, List<ColumnStatisticsObj> val) {
+     if (this.partStats == null) {
+       this.partStats = new HashMap<String,List<ColumnStatisticsObj>>();
+     }
+     this.partStats.put(key, val);
+   }
+ 
+   public Map<String,List<ColumnStatisticsObj>> getPartStats() {
+     return this.partStats;
+   }
+ 
+   public void setPartStats(Map<String,List<ColumnStatisticsObj>> partStats) {
+     this.partStats = partStats;
+   }
+ 
+   public void unsetPartStats() {
+     this.partStats = null;
+   }
+ 
+   /** Returns true if field partStats is set (has been assigned a value) and false otherwise */
+   public boolean isSetPartStats() {
+     return this.partStats != null;
+   }
+ 
+   public void setPartStatsIsSet(boolean value) {
+     if (!value) {
+       this.partStats = null;
+     }
+   }
+ 
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case PART_STATS:
+       if (value == null) {
+         unsetPartStats();
+       } else {
+         setPartStats((Map<String,List<ColumnStatisticsObj>>)value);
+       }
+       break;
+ 
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case PART_STATS:
+       return getPartStats();
+ 
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case PART_STATS:
+       return isSetPartStats();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof PartitionsStatsResult)
+       return this.equals((PartitionsStatsResult)that);
+     return false;
+   }
+ 
+   public boolean equals(PartitionsStatsResult that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_partStats = true && this.isSetPartStats();
+     boolean that_present_partStats = true && that.isSetPartStats();
+     if (this_present_partStats || that_present_partStats) {
+       if (!(this_present_partStats && that_present_partStats))
+         return false;
+       if (!this.partStats.equals(that.partStats))
+         return false;
+     }
+ 
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_partStats = true && (isSetPartStats());
+     list.add(present_partStats);
+     if (present_partStats)
+       list.add(partStats);
+ 
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(PartitionsStatsResult other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetPartStats()).compareTo(other.isSetPartStats());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPartStats()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partStats, other.partStats);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("PartitionsStatsResult(");
+     boolean first = true;
+ 
+     sb.append("partStats:");
+     if (this.partStats == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.partStats);
+     }
+     first = false;
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetPartStats()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'partStats' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class PartitionsStatsResultStandardSchemeFactory implements SchemeFactory {
+     public PartitionsStatsResultStandardScheme getScheme() {
+       return new PartitionsStatsResultStandardScheme();
+     }
+   }
+ 
+   private static class PartitionsStatsResultStandardScheme extends StandardScheme<PartitionsStatsResult> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // PART_STATS
+             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+               {
+                 org.apache.thrift.protocol.TMap _map432 = iprot.readMapBegin();
+                 struct.partStats = new HashMap<String,List<ColumnStatisticsObj>>(2*_map432.size);
+                 String _key433;
+                 List<ColumnStatisticsObj> _val434;
+                 for (int _i435 = 0; _i435 < _map432.size; ++_i435)
+                 {
+                   _key433 = iprot.readString();
+                   {
+                     org.apache.thrift.protocol.TList _list436 = iprot.readListBegin();
+                     _val434 = new ArrayList<ColumnStatisticsObj>(_list436.size);
+                     ColumnStatisticsObj _elem437;
+                     for (int _i438 = 0; _i438 < _list436.size; ++_i438)
+                     {
+                       _elem437 = new ColumnStatisticsObj();
+                       _elem437.read(iprot);
+                       _val434.add(_elem437);
+                     }
+                     iprot.readListEnd();
+                   }
+                   struct.partStats.put(_key433, _val434);
+                 }
+                 iprot.readMapEnd();
+               }
+               struct.setPartStatsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 2: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.partStats != null) {
+         oprot.writeFieldBegin(PART_STATS_FIELD_DESC);
+         {
+           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size()));
+           for (Map.Entry<String, List<ColumnStatisticsObj>> _iter439 : struct.partStats.entrySet())
+           {
+             oprot.writeString(_iter439.getKey());
+             {
+               oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter439.getValue().size()));
+               for (ColumnStatisticsObj _iter440 : _iter439.getValue())
+               {
+                 _iter440.write(oprot);
+               }
+               oprot.writeListEnd();
+             }
+           }
+           oprot.writeMapEnd();
+         }
+         oprot.writeFieldEnd();
+       }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class PartitionsStatsResultTupleSchemeFactory implements SchemeFactory {
+     public PartitionsStatsResultTupleScheme getScheme() {
+       return new PartitionsStatsResultTupleScheme();
+     }
+   }
+ 
+   private static class PartitionsStatsResultTupleScheme extends TupleScheme<PartitionsStatsResult> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       {
+         oprot.writeI32(struct.partStats.size());
+         for (Map.Entry<String, List<ColumnStatisticsObj>> _iter441 : struct.partStats.entrySet())
+         {
+           oprot.writeString(_iter441.getKey());
+           {
+             oprot.writeI32(_iter441.getValue().size());
+             for (ColumnStatisticsObj _iter442 : _iter441.getValue())
+             {
+               _iter442.write(oprot);
+             }
+           }
+         }
+       }
++      BitSet optionals = new BitSet();
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(0);
++      }
++      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       {
+         org.apache.thrift.protocol.TMap _map443 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+         struct.partStats = new HashMap<String,List<ColumnStatisticsObj>>(2*_map443.size);
+         String _key444;
+         List<ColumnStatisticsObj> _val445;
+         for (int _i446 = 0; _i446 < _map443.size; ++_i446)
+         {
+           _key444 = iprot.readString();
+           {
+             org.apache.thrift.protocol.TList _list447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+             _val445 = new ArrayList<ColumnStatisticsObj>(_list447.size);
+             ColumnStatisticsObj _elem448;
+             for (int _i449 = 0; _i449 < _list447.size; ++_i449)
+             {
+               _elem448 = new ColumnStatisticsObj();
+               _elem448.read(iprot);
+               _val445.add(_elem448);
+             }
+           }
+           struct.partStats.put(_key444, _val445);
+         }
+       }
+       struct.setPartStatsIsSet(true);
++      BitSet incoming = iprot.readBitSet(1);
++      if (incoming.get(0)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
index 0000000,a0ae84e..2053e3e
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java
@@@ -1,0 -1,550 +1,858 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SetPartitionsStatsRequest implements org.apache.thrift.TBase<SetPartitionsStatsRequest, SetPartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<SetPartitionsStatsRequest> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetPartitionsStatsRequest");
+ 
+   private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
+   private static final org.apache.thrift.protocol.TField NEED_MERGE_FIELD_DESC = new org.apache.thrift.protocol.TField("needMerge", org.apache.thrift.protocol.TType.BOOL, (short)2);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)3);
++  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)4);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)5);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new SetPartitionsStatsRequestStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new SetPartitionsStatsRequestTupleSchemeFactory());
+   }
+ 
+   private List<ColumnStatistics> colStats; // required
+   private boolean needMerge; // optional
++  private long txnId; // optional
++  private long writeId; // optional
++  private String validWriteIdList; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     COL_STATS((short)1, "colStats"),
 -    NEED_MERGE((short)2, "needMerge");
++    NEED_MERGE((short)2, "needMerge"),
++    TXN_ID((short)3, "txnId"),
++    WRITE_ID((short)4, "writeId"),
++    VALID_WRITE_ID_LIST((short)5, "validWriteIdList");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // COL_STATS
+           return COL_STATS;
+         case 2: // NEED_MERGE
+           return NEED_MERGE;
++        case 3: // TXN_ID
++          return TXN_ID;
++        case 4: // WRITE_ID
++          return WRITE_ID;
++        case 5: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
+   private static final int __NEEDMERGE_ISSET_ID = 0;
++  private static final int __TXNID_ISSET_ID = 1;
++  private static final int __WRITEID_ISSET_ID = 2;
+   private byte __isset_bitfield = 0;
 -  private static final _Fields optionals[] = {_Fields.NEED_MERGE};
++  private static final _Fields optionals[] = {_Fields.NEED_MERGE,_Fields.TXN_ID,_Fields.WRITE_ID,_Fields.VALID_WRITE_ID_LIST};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.COL_STATS, new org.apache.thrift.meta_data.FieldMetaData("colStats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class))));
+     tmpMap.put(_Fields.NEED_MERGE, new org.apache.thrift.meta_data.FieldMetaData("needMerge", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap);
+   }
+ 
+   public SetPartitionsStatsRequest() {
++    this.txnId = -1L;
++
++    this.writeId = -1L;
++
+   }
+ 
+   public SetPartitionsStatsRequest(
+     List<ColumnStatistics> colStats)
+   {
+     this();
+     this.colStats = colStats;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public SetPartitionsStatsRequest(SetPartitionsStatsRequest other) {
+     __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetColStats()) {
+       List<ColumnStatistics> __this__colStats = new ArrayList<ColumnStatistics>(other.colStats.size());
+       for (ColumnStatistics other_element : other.colStats) {
+         __this__colStats.add(new ColumnStatistics(other_element));
+       }
+       this.colStats = __this__colStats;
+     }
+     this.needMerge = other.needMerge;
++    this.txnId = other.txnId;
++    this.writeId = other.writeId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
+   }
+ 
+   public SetPartitionsStatsRequest deepCopy() {
+     return new SetPartitionsStatsRequest(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.colStats = null;
+     setNeedMergeIsSet(false);
+     this.needMerge = false;
++    this.txnId = -1L;
++
++    this.writeId = -1L;
++
++    this.validWriteIdList = null;
+   }
+ 
+   public int getColStatsSize() {
+     return (this.colStats == null) ? 0 : this.colStats.size();
+   }
+ 
+   public java.util.Iterator<ColumnStatistics> getColStatsIterator() {
+     return (this.colStats == null) ? null : this.colStats.iterator();
+   }
+ 
+   public void addToColStats(ColumnStatistics elem) {
+     if (this.colStats == null) {
+       this.colStats = new ArrayList<ColumnStatistics>();
+     }
+     this.colStats.add(elem);
+   }
+ 
+   public List<ColumnStatistics> getColStats() {
+     return this.colStats;
+   }
+ 
+   public void setColStats(List<ColumnStatistics> colStats) {
+     this.colStats = colStats;
+   }
+ 
+   public void unsetColStats() {
+     this.colStats = null;
+   }
+ 
+   /** Returns true if field colStats is set (has been assigned a value) and false otherwise */
+   public boolean isSetColStats() {
+     return this.colStats != null;
+   }
+ 
+   public void setColStatsIsSet(boolean value) {
+     if (!value) {
+       this.colStats = null;
+     }
+   }
+ 
+   public boolean isNeedMerge() {
+     return this.needMerge;
+   }
+ 
+   public void setNeedMerge(boolean needMerge) {
+     this.needMerge = needMerge;
+     setNeedMergeIsSet(true);
+   }
+ 
+   public void unsetNeedMerge() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NEEDMERGE_ISSET_ID);
+   }
+ 
+   /** Returns true if field needMerge is set (has been assigned a value) and false otherwise */
+   public boolean isSetNeedMerge() {
+     return EncodingUtils.testBit(__isset_bitfield, __NEEDMERGE_ISSET_ID);
+   }
+ 
+   public void setNeedMergeIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDMERGE_ISSET_ID, value);
+   }
+ 
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public long getWriteId() {
++    return this.writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++    setWriteIdIsSet(true);
++  }
++
++  public void unsetWriteId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
++  public boolean isSetWriteId() {
++    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  public void setWriteIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case COL_STATS:
+       if (value == null) {
+         unsetColStats();
+       } else {
+         setColStats((List<ColumnStatistics>)value);
+       }
+       break;
+ 
+     case NEED_MERGE:
+       if (value == null) {
+         unsetNeedMerge();
+       } else {
+         setNeedMerge((Boolean)value);
+       }
+       break;
+ 
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case WRITE_ID:
++      if (value == null) {
++        unsetWriteId();
++      } else {
++        setWriteId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case COL_STATS:
+       return getColStats();
+ 
+     case NEED_MERGE:
+       return isNeedMerge();
+ 
++    case TXN_ID:
++      return getTxnId();
++
++    case WRITE_ID:
++      return getWriteId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case COL_STATS:
+       return isSetColStats();
+     case NEED_MERGE:
+       return isSetNeedMerge();
++    case TXN_ID:
++      return isSetTxnId();
++    case WRITE_ID:
++      return isSetWriteId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof SetPartitionsStatsRequest)
+       return this.equals((SetPartitionsStatsRequest)that);
+     return false;
+   }
+ 
+   public boolean equals(SetPartitionsStatsRequest that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_colStats = true && this.isSetColStats();
+     boolean that_present_colStats = true && that.isSetColStats();
+     if (this_present_colStats || that_present_colStats) {
+       if (!(this_present_colStats && that_present_colStats))
+         return false;
+       if (!this.colStats.equals(that.colStats))
+         return false;
+     }
+ 
+     boolean this_present_needMerge = true && this.isSetNeedMerge();
+     boolean that_present_needMerge = true && that.isSetNeedMerge();
+     if (this_present_needMerge || that_present_needMerge) {
+       if (!(this_present_needMerge && that_present_needMerge))
+         return false;
+       if (this.needMerge != that.needMerge)
+         return false;
+     }
+ 
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_writeId = true && this.isSetWriteId();
++    boolean that_present_writeId = true && that.isSetWriteId();
++    if (this_present_writeId || that_present_writeId) {
++      if (!(this_present_writeId && that_present_writeId))
++        return false;
++      if (this.writeId != that.writeId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_colStats = true && (isSetColStats());
+     list.add(present_colStats);
+     if (present_colStats)
+       list.add(colStats);
+ 
+     boolean present_needMerge = true && (isSetNeedMerge());
+     list.add(present_needMerge);
+     if (present_needMerge)
+       list.add(needMerge);
+ 
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_writeId = true && (isSetWriteId());
++    list.add(present_writeId);
++    if (present_writeId)
++      list.add(writeId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(SetPartitionsStatsRequest other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetColStats()).compareTo(other.isSetColStats());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetColStats()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colStats, other.colStats);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetNeedMerge()).compareTo(other.isSetNeedMerge());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetNeedMerge()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.needMerge, other.needMerge);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetWriteId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("SetPartitionsStatsRequest(");
+     boolean first = true;
+ 
+     sb.append("colStats:");
+     if (this.colStats == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.colStats);
+     }
+     first = false;
+     if (isSetNeedMerge()) {
+       if (!first) sb.append(", ");
+       sb.append("needMerge:");
+       sb.append(this.needMerge);
+       first = false;
+     }
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetWriteId()) {
++      if (!first) sb.append(", ");
++      sb.append("writeId:");
++      sb.append(this.writeId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetColStats()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'colStats' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
+       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+       __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class SetPartitionsStatsRequestStandardSchemeFactory implements SchemeFactory {
+     public SetPartitionsStatsRequestStandardScheme getScheme() {
+       return new SetPartitionsStatsRequestStandardScheme();
+     }
+   }
+ 
+   private static class SetPartitionsStatsRequestStandardScheme extends StandardScheme<SetPartitionsStatsRequest> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // COL_STATS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list284 = iprot.readListBegin();
+                 struct.colStats = new ArrayList<ColumnStatistics>(_list284.size);
+                 ColumnStatistics _elem285;
+                 for (int _i286 = 0; _i286 < _list284.size; ++_i286)
+                 {
+                   _elem285 = new ColumnStatistics();
+                   _elem285.read(iprot);
+                   struct.colStats.add(_elem285);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setColStatsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // NEED_MERGE
+             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+               struct.needMerge = iprot.readBool();
+               struct.setNeedMergeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 3: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 4: // WRITE_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.writeId = iprot.readI64();
++              struct.setWriteIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 5: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.colStats != null) {
+         oprot.writeFieldBegin(COL_STATS_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size()));
+           for (ColumnStatistics _iter287 : struct.colStats)
+           {
+             _iter287.write(oprot);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.isSetNeedMerge()) {
+         oprot.writeFieldBegin(NEED_MERGE_FIELD_DESC);
+         oprot.writeBool(struct.needMerge);
+         oprot.writeFieldEnd();
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetWriteId()) {
++        oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
++        oprot.writeI64(struct.writeId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class SetPartitionsStatsRequestTupleSchemeFactory implements SchemeFactory {
+     public SetPartitionsStatsRequestTupleScheme getScheme() {
+       return new SetPartitionsStatsRequestTupleScheme();
+     }
+   }
+ 
+   private static class SetPartitionsStatsRequestTupleScheme extends TupleScheme<SetPartitionsStatsRequest> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       {
+         oprot.writeI32(struct.colStats.size());
+         for (ColumnStatistics _iter288 : struct.colStats)
+         {
+           _iter288.write(oprot);
+         }
+       }
+       BitSet optionals = new BitSet();
+       if (struct.isSetNeedMerge()) {
+         optionals.set(0);
+       }
 -      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetTxnId()) {
++        optionals.set(1);
++      }
++      if (struct.isSetWriteId()) {
++        optionals.set(2);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(3);
++      }
++      oprot.writeBitSet(optionals, 4);
+       if (struct.isSetNeedMerge()) {
+         oprot.writeBool(struct.needMerge);
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetWriteId()) {
++        oprot.writeI64(struct.writeId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       {
+         org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+         struct.colStats = new ArrayList<ColumnStatistics>(_list289.size);
+         ColumnStatistics _elem290;
+         for (int _i291 = 0; _i291 < _list289.size; ++_i291)
+         {
+           _elem290 = new ColumnStatistics();
+           _elem290.read(iprot);
+           struct.colStats.add(_elem290);
+         }
+       }
+       struct.setColStatsIsSet(true);
 -      BitSet incoming = iprot.readBitSet(1);
++      BitSet incoming = iprot.readBitSet(4);
+       if (incoming.get(0)) {
+         struct.needMerge = iprot.readBool();
+         struct.setNeedMergeIsSet(true);
+       }
++      if (incoming.get(1)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.writeId = iprot.readI64();
++        struct.setWriteIdIsSet(true);
++      }
++      if (incoming.get(3)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[08/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
new file mode 100644
index 0000000..91cf567
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@ -0,0 +1,900 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionsStatsRequest implements org.apache.thrift.TBase<PartitionsStatsRequest, PartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionsStatsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionsStatsRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private List<String> colNames; // required
+  private List<String> partNames; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    COL_NAMES((short)3, "colNames"),
+    PART_NAMES((short)4, "partNames"),
+    CAT_NAME((short)5, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // COL_NAMES
+          return COL_NAMES;
+        case 4: // PART_NAMES
+          return PART_NAMES;
+        case 5: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("colNames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap);
+  }
+
+  public PartitionsStatsRequest() {
+  }
+
+  public PartitionsStatsRequest(
+    String dbName,
+    String tblName,
+    List<String> colNames,
+    List<String> partNames)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+    this.colNames = colNames;
+    this.partNames = partNames;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionsStatsRequest(PartitionsStatsRequest other) {
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetColNames()) {
+      List<String> __this__colNames = new ArrayList<String>(other.colNames);
+      this.colNames = __this__colNames;
+    }
+    if (other.isSetPartNames()) {
+      List<String> __this__partNames = new ArrayList<String>(other.partNames);
+      this.partNames = __this__partNames;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public PartitionsStatsRequest deepCopy() {
+    return new PartitionsStatsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.colNames = null;
+    this.partNames = null;
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public int getColNamesSize() {
+    return (this.colNames == null) ? 0 : this.colNames.size();
+  }
+
+  public java.util.Iterator<String> getColNamesIterator() {
+    return (this.colNames == null) ? null : this.colNames.iterator();
+  }
+
+  public void addToColNames(String elem) {
+    if (this.colNames == null) {
+      this.colNames = new ArrayList<String>();
+    }
+    this.colNames.add(elem);
+  }
+
+  public List<String> getColNames() {
+    return this.colNames;
+  }
+
+  public void setColNames(List<String> colNames) {
+    this.colNames = colNames;
+  }
+
+  public void unsetColNames() {
+    this.colNames = null;
+  }
+
+  /** Returns true if field colNames is set (has been assigned a value) and false otherwise */
+  public boolean isSetColNames() {
+    return this.colNames != null;
+  }
+
+  public void setColNamesIsSet(boolean value) {
+    if (!value) {
+      this.colNames = null;
+    }
+  }
+
+  public int getPartNamesSize() {
+    return (this.partNames == null) ? 0 : this.partNames.size();
+  }
+
+  public java.util.Iterator<String> getPartNamesIterator() {
+    return (this.partNames == null) ? null : this.partNames.iterator();
+  }
+
+  public void addToPartNames(String elem) {
+    if (this.partNames == null) {
+      this.partNames = new ArrayList<String>();
+    }
+    this.partNames.add(elem);
+  }
+
+  public List<String> getPartNames() {
+    return this.partNames;
+  }
+
+  public void setPartNames(List<String> partNames) {
+    this.partNames = partNames;
+  }
+
+  public void unsetPartNames() {
+    this.partNames = null;
+  }
+
+  /** Returns true if field partNames is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartNames() {
+    return this.partNames != null;
+  }
+
+  public void setPartNamesIsSet(boolean value) {
+    if (!value) {
+      this.partNames = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case COL_NAMES:
+      if (value == null) {
+        unsetColNames();
+      } else {
+        setColNames((List<String>)value);
+      }
+      break;
+
+    case PART_NAMES:
+      if (value == null) {
+        unsetPartNames();
+      } else {
+        setPartNames((List<String>)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case COL_NAMES:
+      return getColNames();
+
+    case PART_NAMES:
+      return getPartNames();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case COL_NAMES:
+      return isSetColNames();
+    case PART_NAMES:
+      return isSetPartNames();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionsStatsRequest)
+      return this.equals((PartitionsStatsRequest)that);
+    return false;
+  }
+
+  public boolean equals(PartitionsStatsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_colNames = true && this.isSetColNames();
+    boolean that_present_colNames = true && that.isSetColNames();
+    if (this_present_colNames || that_present_colNames) {
+      if (!(this_present_colNames && that_present_colNames))
+        return false;
+      if (!this.colNames.equals(that.colNames))
+        return false;
+    }
+
+    boolean this_present_partNames = true && this.isSetPartNames();
+    boolean that_present_partNames = true && that.isSetPartNames();
+    if (this_present_partNames || that_present_partNames) {
+      if (!(this_present_partNames && that_present_partNames))
+        return false;
+      if (!this.partNames.equals(that.partNames))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_colNames = true && (isSetColNames());
+    list.add(present_colNames);
+    if (present_colNames)
+      list.add(colNames);
+
+    boolean present_partNames = true && (isSetPartNames());
+    list.add(present_partNames);
+    if (present_partNames)
+      list.add(partNames);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionsStatsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColNames()).compareTo(other.isSetColNames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColNames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colNames, other.colNames);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartNames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionsStatsRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("colNames:");
+    if (this.colNames == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.colNames);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("partNames:");
+    if (this.partNames == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partNames);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetColNames()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'colNames' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPartNames()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partNames' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionsStatsRequestStandardSchemeFactory implements SchemeFactory {
+    public PartitionsStatsRequestStandardScheme getScheme() {
+      return new PartitionsStatsRequestStandardScheme();
+    }
+  }
+
+  private static class PartitionsStatsRequestStandardScheme extends StandardScheme<PartitionsStatsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // COL_NAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list458 = iprot.readListBegin();
+                struct.colNames = new ArrayList<String>(_list458.size);
+                String _elem459;
+                for (int _i460 = 0; _i460 < _list458.size; ++_i460)
+                {
+                  _elem459 = iprot.readString();
+                  struct.colNames.add(_elem459);
+                }
+                iprot.readListEnd();
+              }
+              struct.setColNamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PART_NAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list461 = iprot.readListBegin();
+                struct.partNames = new ArrayList<String>(_list461.size);
+                String _elem462;
+                for (int _i463 = 0; _i463 < _list461.size; ++_i463)
+                {
+                  _elem462 = iprot.readString();
+                  struct.partNames.add(_elem462);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartNamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.colNames != null) {
+        oprot.writeFieldBegin(COL_NAMES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size()));
+          for (String _iter464 : struct.colNames)
+          {
+            oprot.writeString(_iter464);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.partNames != null) {
+        oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
+          for (String _iter465 : struct.partNames)
+          {
+            oprot.writeString(_iter465);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionsStatsRequestTupleSchemeFactory implements SchemeFactory {
+    public PartitionsStatsRequestTupleScheme getScheme() {
+      return new PartitionsStatsRequestTupleScheme();
+    }
+  }
+
+  private static class PartitionsStatsRequestTupleScheme extends TupleScheme<PartitionsStatsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      {
+        oprot.writeI32(struct.colNames.size());
+        for (String _iter466 : struct.colNames)
+        {
+          oprot.writeString(_iter466);
+        }
+      }
+      {
+        oprot.writeI32(struct.partNames.size());
+        for (String _iter467 : struct.partNames)
+        {
+          oprot.writeString(_iter467);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list468 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.colNames = new ArrayList<String>(_list468.size);
+        String _elem469;
+        for (int _i470 = 0; _i470 < _list468.size; ++_i470)
+        {
+          _elem469 = iprot.readString();
+          struct.colNames.add(_elem469);
+        }
+      }
+      struct.setColNamesIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.partNames = new ArrayList<String>(_list471.size);
+        String _elem472;
+        for (int _i473 = 0; _i473 < _list471.size; ++_i473)
+        {
+          _elem472 = iprot.readString();
+          struct.partNames.add(_elem472);
+        }
+      }
+      struct.setPartNamesIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
new file mode 100644
index 0000000..4caec8f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java
@@ -0,0 +1,490 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionsStatsResult implements org.apache.thrift.TBase<PartitionsStatsResult, PartitionsStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsResult");
+
+  private static final org.apache.thrift.protocol.TField PART_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("partStats", org.apache.thrift.protocol.TType.MAP, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionsStatsResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionsStatsResultTupleSchemeFactory());
+  }
+
+  private Map<String,List<ColumnStatisticsObj>> partStats; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PART_STATS((short)1, "partStats");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PART_STATS
+          return PART_STATS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PART_STATS, new org.apache.thrift.meta_data.FieldMetaData("partStats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+                new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class)))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsResult.class, metaDataMap);
+  }
+
+  public PartitionsStatsResult() {
+  }
+
+  public PartitionsStatsResult(
+    Map<String,List<ColumnStatisticsObj>> partStats)
+  {
+    this();
+    this.partStats = partStats;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionsStatsResult(PartitionsStatsResult other) {
+    if (other.isSetPartStats()) {
+      Map<String,List<ColumnStatisticsObj>> __this__partStats = new HashMap<String,List<ColumnStatisticsObj>>(other.partStats.size());
+      for (Map.Entry<String, List<ColumnStatisticsObj>> other_element : other.partStats.entrySet()) {
+
+        String other_element_key = other_element.getKey();
+        List<ColumnStatisticsObj> other_element_value = other_element.getValue();
+
+        String __this__partStats_copy_key = other_element_key;
+
+        List<ColumnStatisticsObj> __this__partStats_copy_value = new ArrayList<ColumnStatisticsObj>(other_element_value.size());
+        for (ColumnStatisticsObj other_element_value_element : other_element_value) {
+          __this__partStats_copy_value.add(new ColumnStatisticsObj(other_element_value_element));
+        }
+
+        __this__partStats.put(__this__partStats_copy_key, __this__partStats_copy_value);
+      }
+      this.partStats = __this__partStats;
+    }
+  }
+
+  public PartitionsStatsResult deepCopy() {
+    return new PartitionsStatsResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partStats = null;
+  }
+
+  public int getPartStatsSize() {
+    return (this.partStats == null) ? 0 : this.partStats.size();
+  }
+
+  public void putToPartStats(String key, List<ColumnStatisticsObj> val) {
+    if (this.partStats == null) {
+      this.partStats = new HashMap<String,List<ColumnStatisticsObj>>();
+    }
+    this.partStats.put(key, val);
+  }
+
+  public Map<String,List<ColumnStatisticsObj>> getPartStats() {
+    return this.partStats;
+  }
+
+  public void setPartStats(Map<String,List<ColumnStatisticsObj>> partStats) {
+    this.partStats = partStats;
+  }
+
+  public void unsetPartStats() {
+    this.partStats = null;
+  }
+
+  /** Returns true if field partStats is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartStats() {
+    return this.partStats != null;
+  }
+
+  public void setPartStatsIsSet(boolean value) {
+    if (!value) {
+      this.partStats = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PART_STATS:
+      if (value == null) {
+        unsetPartStats();
+      } else {
+        setPartStats((Map<String,List<ColumnStatisticsObj>>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PART_STATS:
+      return getPartStats();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PART_STATS:
+      return isSetPartStats();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionsStatsResult)
+      return this.equals((PartitionsStatsResult)that);
+    return false;
+  }
+
+  public boolean equals(PartitionsStatsResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partStats = true && this.isSetPartStats();
+    boolean that_present_partStats = true && that.isSetPartStats();
+    if (this_present_partStats || that_present_partStats) {
+      if (!(this_present_partStats && that_present_partStats))
+        return false;
+      if (!this.partStats.equals(that.partStats))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partStats = true && (isSetPartStats());
+    list.add(present_partStats);
+    if (present_partStats)
+      list.add(partStats);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionsStatsResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartStats()).compareTo(other.isSetPartStats());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartStats()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partStats, other.partStats);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionsStatsResult(");
+    boolean first = true;
+
+    sb.append("partStats:");
+    if (this.partStats == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partStats);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPartStats()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partStats' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionsStatsResultStandardSchemeFactory implements SchemeFactory {
+    public PartitionsStatsResultStandardScheme getScheme() {
+      return new PartitionsStatsResultStandardScheme();
+    }
+  }
+
+  private static class PartitionsStatsResultStandardScheme extends StandardScheme<PartitionsStatsResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PART_STATS
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map432 = iprot.readMapBegin();
+                struct.partStats = new HashMap<String,List<ColumnStatisticsObj>>(2*_map432.size);
+                String _key433;
+                List<ColumnStatisticsObj> _val434;
+                for (int _i435 = 0; _i435 < _map432.size; ++_i435)
+                {
+                  _key433 = iprot.readString();
+                  {
+                    org.apache.thrift.protocol.TList _list436 = iprot.readListBegin();
+                    _val434 = new ArrayList<ColumnStatisticsObj>(_list436.size);
+                    ColumnStatisticsObj _elem437;
+                    for (int _i438 = 0; _i438 < _list436.size; ++_i438)
+                    {
+                      _elem437 = new ColumnStatisticsObj();
+                      _elem437.read(iprot);
+                      _val434.add(_elem437);
+                    }
+                    iprot.readListEnd();
+                  }
+                  struct.partStats.put(_key433, _val434);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setPartStatsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partStats != null) {
+        oprot.writeFieldBegin(PART_STATS_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size()));
+          for (Map.Entry<String, List<ColumnStatisticsObj>> _iter439 : struct.partStats.entrySet())
+          {
+            oprot.writeString(_iter439.getKey());
+            {
+              oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter439.getValue().size()));
+              for (ColumnStatisticsObj _iter440 : _iter439.getValue())
+              {
+                _iter440.write(oprot);
+              }
+              oprot.writeListEnd();
+            }
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionsStatsResultTupleSchemeFactory implements SchemeFactory {
+    public PartitionsStatsResultTupleScheme getScheme() {
+      return new PartitionsStatsResultTupleScheme();
+    }
+  }
+
+  private static class PartitionsStatsResultTupleScheme extends TupleScheme<PartitionsStatsResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.partStats.size());
+        for (Map.Entry<String, List<ColumnStatisticsObj>> _iter441 : struct.partStats.entrySet())
+        {
+          oprot.writeString(_iter441.getKey());
+          {
+            oprot.writeI32(_iter441.getValue().size());
+            for (ColumnStatisticsObj _iter442 : _iter441.getValue())
+            {
+              _iter442.write(oprot);
+            }
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TMap _map443 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32());
+        struct.partStats = new HashMap<String,List<ColumnStatisticsObj>>(2*_map443.size);
+        String _key444;
+        List<ColumnStatisticsObj> _val445;
+        for (int _i446 = 0; _i446 < _map443.size; ++_i446)
+        {
+          _key444 = iprot.readString();
+          {
+            org.apache.thrift.protocol.TList _list447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+            _val445 = new ArrayList<ColumnStatisticsObj>(_list447.size);
+            ColumnStatisticsObj _elem448;
+            for (int _i449 = 0; _i449 < _list447.size; ++_i449)
+            {
+              _elem448 = new ColumnStatisticsObj();
+              _elem448.read(iprot);
+              _val445.add(_elem448);
+            }
+          }
+          struct.partStats.put(_key444, _val445);
+        }
+      }
+      struct.setPartStatsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java
new file mode 100644
index 0000000..591348d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysRequest.java
@@ -0,0 +1,600 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PrimaryKeysRequest implements org.apache.thrift.TBase<PrimaryKeysRequest, PrimaryKeysRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PrimaryKeysRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrimaryKeysRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PrimaryKeysRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PrimaryKeysRequestTupleSchemeFactory());
+  }
+
+  private String db_name; // required
+  private String tbl_name; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "db_name"),
+    TBL_NAME((short)2, "tbl_name"),
+    CAT_NAME((short)3, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrimaryKeysRequest.class, metaDataMap);
+  }
+
+  public PrimaryKeysRequest() {
+  }
+
+  public PrimaryKeysRequest(
+    String db_name,
+    String tbl_name)
+  {
+    this();
+    this.db_name = db_name;
+    this.tbl_name = tbl_name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PrimaryKeysRequest(PrimaryKeysRequest other) {
+    if (other.isSetDb_name()) {
+      this.db_name = other.db_name;
+    }
+    if (other.isSetTbl_name()) {
+      this.tbl_name = other.tbl_name;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public PrimaryKeysRequest deepCopy() {
+    return new PrimaryKeysRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.db_name = null;
+    this.tbl_name = null;
+    this.catName = null;
+  }
+
+  public String getDb_name() {
+    return this.db_name;
+  }
+
+  public void setDb_name(String db_name) {
+    this.db_name = db_name;
+  }
+
+  public void unsetDb_name() {
+    this.db_name = null;
+  }
+
+  /** Returns true if field db_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetDb_name() {
+    return this.db_name != null;
+  }
+
+  public void setDb_nameIsSet(boolean value) {
+    if (!value) {
+      this.db_name = null;
+    }
+  }
+
+  public String getTbl_name() {
+    return this.tbl_name;
+  }
+
+  public void setTbl_name(String tbl_name) {
+    this.tbl_name = tbl_name;
+  }
+
+  public void unsetTbl_name() {
+    this.tbl_name = null;
+  }
+
+  /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTbl_name() {
+    return this.tbl_name != null;
+  }
+
+  public void setTbl_nameIsSet(boolean value) {
+    if (!value) {
+      this.tbl_name = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDb_name();
+      } else {
+        setDb_name((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTbl_name();
+      } else {
+        setTbl_name((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDb_name();
+
+    case TBL_NAME:
+      return getTbl_name();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDb_name();
+    case TBL_NAME:
+      return isSetTbl_name();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PrimaryKeysRequest)
+      return this.equals((PrimaryKeysRequest)that);
+    return false;
+  }
+
+  public boolean equals(PrimaryKeysRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_db_name = true && this.isSetDb_name();
+    boolean that_present_db_name = true && that.isSetDb_name();
+    if (this_present_db_name || that_present_db_name) {
+      if (!(this_present_db_name && that_present_db_name))
+        return false;
+      if (!this.db_name.equals(that.db_name))
+        return false;
+    }
+
+    boolean this_present_tbl_name = true && this.isSetTbl_name();
+    boolean that_present_tbl_name = true && that.isSetTbl_name();
+    if (this_present_tbl_name || that_present_tbl_name) {
+      if (!(this_present_tbl_name && that_present_tbl_name))
+        return false;
+      if (!this.tbl_name.equals(that.tbl_name))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_db_name = true && (isSetDb_name());
+    list.add(present_db_name);
+    if (present_db_name)
+      list.add(db_name);
+
+    boolean present_tbl_name = true && (isSetTbl_name());
+    list.add(present_tbl_name);
+    if (present_tbl_name)
+      list.add(tbl_name);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PrimaryKeysRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDb_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTbl_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PrimaryKeysRequest(");
+    boolean first = true;
+
+    sb.append("db_name:");
+    if (this.db_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.db_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tbl_name:");
+    if (this.tbl_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tbl_name);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDb_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTbl_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tbl_name' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PrimaryKeysRequestStandardSchemeFactory implements SchemeFactory {
+    public PrimaryKeysRequestStandardScheme getScheme() {
+      return new PrimaryKeysRequestStandardScheme();
+    }
+  }
+
+  private static class PrimaryKeysRequestStandardScheme extends StandardScheme<PrimaryKeysRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.db_name = iprot.readString();
+              struct.setDb_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tbl_name = iprot.readString();
+              struct.setTbl_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.db_name != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.db_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tbl_name != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tbl_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PrimaryKeysRequestTupleSchemeFactory implements SchemeFactory {
+    public PrimaryKeysRequestTupleScheme getScheme() {
+      return new PrimaryKeysRequestTupleScheme();
+    }
+  }
+
+  private static class PrimaryKeysRequestTupleScheme extends TupleScheme<PrimaryKeysRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.db_name);
+      oprot.writeString(struct.tbl_name);
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.db_name = iprot.readString();
+      struct.setDb_nameIsSet(true);
+      struct.tbl_name = iprot.readString();
+      struct.setTbl_nameIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java
new file mode 100644
index 0000000..0b776fb
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrimaryKeysResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PrimaryKeysResponse implements org.apache.thrift.TBase<PrimaryKeysResponse, PrimaryKeysResponse._Fields>, java.io.Serializable, Cloneable, Comparable<PrimaryKeysResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PrimaryKeysResponse");
+
+  private static final org.apache.thrift.protocol.TField PRIMARY_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("primaryKeys", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PrimaryKeysResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PrimaryKeysResponseTupleSchemeFactory());
+  }
+
+  private List<SQLPrimaryKey> primaryKeys; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRIMARY_KEYS((short)1, "primaryKeys");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRIMARY_KEYS
+          return PRIMARY_KEYS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRIMARY_KEYS, new org.apache.thrift.meta_data.FieldMetaData("primaryKeys", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLPrimaryKey.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PrimaryKeysResponse.class, metaDataMap);
+  }
+
+  public PrimaryKeysResponse() {
+  }
+
+  public PrimaryKeysResponse(
+    List<SQLPrimaryKey> primaryKeys)
+  {
+    this();
+    this.primaryKeys = primaryKeys;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PrimaryKeysResponse(PrimaryKeysResponse other) {
+    if (other.isSetPrimaryKeys()) {
+      List<SQLPrimaryKey> __this__primaryKeys = new ArrayList<SQLPrimaryKey>(other.primaryKeys.size());
+      for (SQLPrimaryKey other_element : other.primaryKeys) {
+        __this__primaryKeys.add(new SQLPrimaryKey(other_element));
+      }
+      this.primaryKeys = __this__primaryKeys;
+    }
+  }
+
+  public PrimaryKeysResponse deepCopy() {
+    return new PrimaryKeysResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.primaryKeys = null;
+  }
+
+  public int getPrimaryKeysSize() {
+    return (this.primaryKeys == null) ? 0 : this.primaryKeys.size();
+  }
+
+  public java.util.Iterator<SQLPrimaryKey> getPrimaryKeysIterator() {
+    return (this.primaryKeys == null) ? null : this.primaryKeys.iterator();
+  }
+
+  public void addToPrimaryKeys(SQLPrimaryKey elem) {
+    if (this.primaryKeys == null) {
+      this.primaryKeys = new ArrayList<SQLPrimaryKey>();
+    }
+    this.primaryKeys.add(elem);
+  }
+
+  public List<SQLPrimaryKey> getPrimaryKeys() {
+    return this.primaryKeys;
+  }
+
+  public void setPrimaryKeys(List<SQLPrimaryKey> primaryKeys) {
+    this.primaryKeys = primaryKeys;
+  }
+
+  public void unsetPrimaryKeys() {
+    this.primaryKeys = null;
+  }
+
+  /** Returns true if field primaryKeys is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrimaryKeys() {
+    return this.primaryKeys != null;
+  }
+
+  public void setPrimaryKeysIsSet(boolean value) {
+    if (!value) {
+      this.primaryKeys = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRIMARY_KEYS:
+      if (value == null) {
+        unsetPrimaryKeys();
+      } else {
+        setPrimaryKeys((List<SQLPrimaryKey>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRIMARY_KEYS:
+      return getPrimaryKeys();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRIMARY_KEYS:
+      return isSetPrimaryKeys();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PrimaryKeysResponse)
+      return this.equals((PrimaryKeysResponse)that);
+    return false;
+  }
+
+  public boolean equals(PrimaryKeysResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_primaryKeys = true && this.isSetPrimaryKeys();
+    boolean that_present_primaryKeys = true && that.isSetPrimaryKeys();
+    if (this_present_primaryKeys || that_present_primaryKeys) {
+      if (!(this_present_primaryKeys && that_present_primaryKeys))
+        return false;
+      if (!this.primaryKeys.equals(that.primaryKeys))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_primaryKeys = true && (isSetPrimaryKeys());
+    list.add(present_primaryKeys);
+    if (present_primaryKeys)
+      list.add(primaryKeys);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PrimaryKeysResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrimaryKeys()).compareTo(other.isSetPrimaryKeys());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrimaryKeys()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.primaryKeys, other.primaryKeys);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PrimaryKeysResponse(");
+    boolean first = true;
+
+    sb.append("primaryKeys:");
+    if (this.primaryKeys == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.primaryKeys);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPrimaryKeys()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'primaryKeys' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PrimaryKeysResponseStandardSchemeFactory implements SchemeFactory {
+    public PrimaryKeysResponseStandardScheme getScheme() {
+      return new PrimaryKeysResponseStandardScheme();
+    }
+  }
+
+  private static class PrimaryKeysResponseStandardScheme extends StandardScheme<PrimaryKeysResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PrimaryKeysResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRIMARY_KEYS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list320 = iprot.readListBegin();
+                struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list320.size);
+                SQLPrimaryKey _elem321;
+                for (int _i322 = 0; _i322 < _list320.size; ++_i322)
+                {
+                  _elem321 = new SQLPrimaryKey();
+                  _elem321.read(iprot);
+                  struct.primaryKeys.add(_elem321);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPrimaryKeysIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PrimaryKeysResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.primaryKeys != null) {
+        oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size()));
+          for (SQLPrimaryKey _iter323 : struct.primaryKeys)
+          {
+            _iter323.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PrimaryKeysResponseTupleSchemeFactory implements SchemeFactory {
+    public PrimaryKeysResponseTupleScheme getScheme() {
+      return new PrimaryKeysResponseTupleScheme();
+    }
+  }
+
+  private static class PrimaryKeysResponseTupleScheme extends TupleScheme<PrimaryKeysResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.primaryKeys.size());
+        for (SQLPrimaryKey _iter324 : struct.primaryKeys)
+        {
+          _iter324.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PrimaryKeysResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list325 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.primaryKeys = new ArrayList<SQLPrimaryKey>(_list325.size);
+        SQLPrimaryKey _elem326;
+        for (int _i327 = 0; _i327 < _list325.size; ++_i327)
+        {
+          _elem326 = new SQLPrimaryKey();
+          _elem326.read(iprot);
+          struct.primaryKeys.add(_elem326);
+        }
+      }
+      struct.setPrimaryKeysIsSet(true);
+    }
+  }
+
+}
+


[13/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java
new file mode 100644
index 0000000..a4a5218
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountRequest.java
@@ -0,0 +1,598 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotificationEventsCountRequest implements org.apache.thrift.TBase<NotificationEventsCountRequest, NotificationEventsCountRequest._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventsCountRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventsCountRequest");
+
+  private static final org.apache.thrift.protocol.TField FROM_EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("fromEventId", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotificationEventsCountRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotificationEventsCountRequestTupleSchemeFactory());
+  }
+
+  private long fromEventId; // required
+  private String dbName; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FROM_EVENT_ID((short)1, "fromEventId"),
+    DB_NAME((short)2, "dbName"),
+    CAT_NAME((short)3, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FROM_EVENT_ID
+          return FROM_EVENT_ID;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __FROMEVENTID_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FROM_EVENT_ID, new org.apache.thrift.meta_data.FieldMetaData("fromEventId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEventsCountRequest.class, metaDataMap);
+  }
+
+  public NotificationEventsCountRequest() {
+  }
+
+  public NotificationEventsCountRequest(
+    long fromEventId,
+    String dbName)
+  {
+    this();
+    this.fromEventId = fromEventId;
+    setFromEventIdIsSet(true);
+    this.dbName = dbName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotificationEventsCountRequest(NotificationEventsCountRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.fromEventId = other.fromEventId;
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public NotificationEventsCountRequest deepCopy() {
+    return new NotificationEventsCountRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setFromEventIdIsSet(false);
+    this.fromEventId = 0;
+    this.dbName = null;
+    this.catName = null;
+  }
+
+  public long getFromEventId() {
+    return this.fromEventId;
+  }
+
+  public void setFromEventId(long fromEventId) {
+    this.fromEventId = fromEventId;
+    setFromEventIdIsSet(true);
+  }
+
+  public void unsetFromEventId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __FROMEVENTID_ISSET_ID);
+  }
+
+  /** Returns true if field fromEventId is set (has been assigned a value) and false otherwise */
+  public boolean isSetFromEventId() {
+    return EncodingUtils.testBit(__isset_bitfield, __FROMEVENTID_ISSET_ID);
+  }
+
+  public void setFromEventIdIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __FROMEVENTID_ISSET_ID, value);
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FROM_EVENT_ID:
+      if (value == null) {
+        unsetFromEventId();
+      } else {
+        setFromEventId((Long)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FROM_EVENT_ID:
+      return getFromEventId();
+
+    case DB_NAME:
+      return getDbName();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FROM_EVENT_ID:
+      return isSetFromEventId();
+    case DB_NAME:
+      return isSetDbName();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotificationEventsCountRequest)
+      return this.equals((NotificationEventsCountRequest)that);
+    return false;
+  }
+
+  public boolean equals(NotificationEventsCountRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_fromEventId = true;
+    boolean that_present_fromEventId = true;
+    if (this_present_fromEventId || that_present_fromEventId) {
+      if (!(this_present_fromEventId && that_present_fromEventId))
+        return false;
+      if (this.fromEventId != that.fromEventId)
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_fromEventId = true;
+    list.add(present_fromEventId);
+    if (present_fromEventId)
+      list.add(fromEventId);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotificationEventsCountRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFromEventId()).compareTo(other.isSetFromEventId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFromEventId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fromEventId, other.fromEventId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotificationEventsCountRequest(");
+    boolean first = true;
+
+    sb.append("fromEventId:");
+    sb.append(this.fromEventId);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFromEventId()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'fromEventId' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotificationEventsCountRequestStandardSchemeFactory implements SchemeFactory {
+    public NotificationEventsCountRequestStandardScheme getScheme() {
+      return new NotificationEventsCountRequestStandardScheme();
+    }
+  }
+
+  private static class NotificationEventsCountRequestStandardScheme extends StandardScheme<NotificationEventsCountRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventsCountRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FROM_EVENT_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.fromEventId = iprot.readI64();
+              struct.setFromEventIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventsCountRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(FROM_EVENT_ID_FIELD_DESC);
+      oprot.writeI64(struct.fromEventId);
+      oprot.writeFieldEnd();
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotificationEventsCountRequestTupleSchemeFactory implements SchemeFactory {
+    public NotificationEventsCountRequestTupleScheme getScheme() {
+      return new NotificationEventsCountRequestTupleScheme();
+    }
+  }
+
+  private static class NotificationEventsCountRequestTupleScheme extends TupleScheme<NotificationEventsCountRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventsCountRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.fromEventId);
+      oprot.writeString(struct.dbName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventsCountRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.fromEventId = iprot.readI64();
+      struct.setFromEventIdIsSet(true);
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountResponse.java
new file mode 100644
index 0000000..137b72d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventsCountResponse.java
@@ -0,0 +1,387 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotificationEventsCountResponse implements org.apache.thrift.TBase<NotificationEventsCountResponse, NotificationEventsCountResponse._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventsCountResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventsCountResponse");
+
+  private static final org.apache.thrift.protocol.TField EVENTS_COUNT_FIELD_DESC = new org.apache.thrift.protocol.TField("eventsCount", org.apache.thrift.protocol.TType.I64, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotificationEventsCountResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotificationEventsCountResponseTupleSchemeFactory());
+  }
+
+  private long eventsCount; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    EVENTS_COUNT((short)1, "eventsCount");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // EVENTS_COUNT
+          return EVENTS_COUNT;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __EVENTSCOUNT_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.EVENTS_COUNT, new org.apache.thrift.meta_data.FieldMetaData("eventsCount", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEventsCountResponse.class, metaDataMap);
+  }
+
+  public NotificationEventsCountResponse() {
+  }
+
+  public NotificationEventsCountResponse(
+    long eventsCount)
+  {
+    this();
+    this.eventsCount = eventsCount;
+    setEventsCountIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotificationEventsCountResponse(NotificationEventsCountResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.eventsCount = other.eventsCount;
+  }
+
+  public NotificationEventsCountResponse deepCopy() {
+    return new NotificationEventsCountResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setEventsCountIsSet(false);
+    this.eventsCount = 0;
+  }
+
+  public long getEventsCount() {
+    return this.eventsCount;
+  }
+
+  public void setEventsCount(long eventsCount) {
+    this.eventsCount = eventsCount;
+    setEventsCountIsSet(true);
+  }
+
+  public void unsetEventsCount() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EVENTSCOUNT_ISSET_ID);
+  }
+
+  /** Returns true if field eventsCount is set (has been assigned a value) and false otherwise */
+  public boolean isSetEventsCount() {
+    return EncodingUtils.testBit(__isset_bitfield, __EVENTSCOUNT_ISSET_ID);
+  }
+
+  public void setEventsCountIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EVENTSCOUNT_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case EVENTS_COUNT:
+      if (value == null) {
+        unsetEventsCount();
+      } else {
+        setEventsCount((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case EVENTS_COUNT:
+      return getEventsCount();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case EVENTS_COUNT:
+      return isSetEventsCount();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotificationEventsCountResponse)
+      return this.equals((NotificationEventsCountResponse)that);
+    return false;
+  }
+
+  public boolean equals(NotificationEventsCountResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_eventsCount = true;
+    boolean that_present_eventsCount = true;
+    if (this_present_eventsCount || that_present_eventsCount) {
+      if (!(this_present_eventsCount && that_present_eventsCount))
+        return false;
+      if (this.eventsCount != that.eventsCount)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_eventsCount = true;
+    list.add(present_eventsCount);
+    if (present_eventsCount)
+      list.add(eventsCount);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotificationEventsCountResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetEventsCount()).compareTo(other.isSetEventsCount());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEventsCount()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventsCount, other.eventsCount);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotificationEventsCountResponse(");
+    boolean first = true;
+
+    sb.append("eventsCount:");
+    sb.append(this.eventsCount);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetEventsCount()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'eventsCount' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotificationEventsCountResponseStandardSchemeFactory implements SchemeFactory {
+    public NotificationEventsCountResponseStandardScheme getScheme() {
+      return new NotificationEventsCountResponseStandardScheme();
+    }
+  }
+
+  private static class NotificationEventsCountResponseStandardScheme extends StandardScheme<NotificationEventsCountResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventsCountResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // EVENTS_COUNT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.eventsCount = iprot.readI64();
+              struct.setEventsCountIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventsCountResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(EVENTS_COUNT_FIELD_DESC);
+      oprot.writeI64(struct.eventsCount);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotificationEventsCountResponseTupleSchemeFactory implements SchemeFactory {
+    public NotificationEventsCountResponseTupleScheme getScheme() {
+      return new NotificationEventsCountResponseTupleScheme();
+    }
+  }
+
+  private static class NotificationEventsCountResponseTupleScheme extends TupleScheme<NotificationEventsCountResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventsCountResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.eventsCount);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventsCountResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.eventsCount = iprot.readI64();
+      struct.setEventsCountIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
new file mode 100644
index 0000000..2dae2e9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnRequest.java
@@ -0,0 +1,963 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class OpenTxnRequest implements org.apache.thrift.TBase<OpenTxnRequest, OpenTxnRequest._Fields>, java.io.Serializable, Cloneable, Comparable<OpenTxnRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnRequest");
+
+  private static final org.apache.thrift.protocol.TField NUM_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("num_txns", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField HOSTNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("hostname", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField AGENT_INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("agentInfo", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField REPL_SRC_TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("replSrcTxnIds", org.apache.thrift.protocol.TType.LIST, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new OpenTxnRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new OpenTxnRequestTupleSchemeFactory());
+  }
+
+  private int num_txns; // required
+  private String user; // required
+  private String hostname; // required
+  private String agentInfo; // optional
+  private String replPolicy; // optional
+  private List<Long> replSrcTxnIds; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NUM_TXNS((short)1, "num_txns"),
+    USER((short)2, "user"),
+    HOSTNAME((short)3, "hostname"),
+    AGENT_INFO((short)4, "agentInfo"),
+    REPL_POLICY((short)5, "replPolicy"),
+    REPL_SRC_TXN_IDS((short)6, "replSrcTxnIds");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NUM_TXNS
+          return NUM_TXNS;
+        case 2: // USER
+          return USER;
+        case 3: // HOSTNAME
+          return HOSTNAME;
+        case 4: // AGENT_INFO
+          return AGENT_INFO;
+        case 5: // REPL_POLICY
+          return REPL_POLICY;
+        case 6: // REPL_SRC_TXN_IDS
+          return REPL_SRC_TXN_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __NUM_TXNS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.AGENT_INFO,_Fields.REPL_POLICY,_Fields.REPL_SRC_TXN_IDS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NUM_TXNS, new org.apache.thrift.meta_data.FieldMetaData("num_txns", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.USER, new org.apache.thrift.meta_data.FieldMetaData("user", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.HOSTNAME, new org.apache.thrift.meta_data.FieldMetaData("hostname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.AGENT_INFO, new org.apache.thrift.meta_data.FieldMetaData("agentInfo", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.REPL_POLICY, new org.apache.thrift.meta_data.FieldMetaData("replPolicy", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.REPL_SRC_TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("replSrcTxnIds", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenTxnRequest.class, metaDataMap);
+  }
+
+  public OpenTxnRequest() {
+    this.agentInfo = "Unknown";
+
+  }
+
+  public OpenTxnRequest(
+    int num_txns,
+    String user,
+    String hostname)
+  {
+    this();
+    this.num_txns = num_txns;
+    setNum_txnsIsSet(true);
+    this.user = user;
+    this.hostname = hostname;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public OpenTxnRequest(OpenTxnRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.num_txns = other.num_txns;
+    if (other.isSetUser()) {
+      this.user = other.user;
+    }
+    if (other.isSetHostname()) {
+      this.hostname = other.hostname;
+    }
+    if (other.isSetAgentInfo()) {
+      this.agentInfo = other.agentInfo;
+    }
+    if (other.isSetReplPolicy()) {
+      this.replPolicy = other.replPolicy;
+    }
+    if (other.isSetReplSrcTxnIds()) {
+      List<Long> __this__replSrcTxnIds = new ArrayList<Long>(other.replSrcTxnIds);
+      this.replSrcTxnIds = __this__replSrcTxnIds;
+    }
+  }
+
+  public OpenTxnRequest deepCopy() {
+    return new OpenTxnRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setNum_txnsIsSet(false);
+    this.num_txns = 0;
+    this.user = null;
+    this.hostname = null;
+    this.agentInfo = "Unknown";
+
+    this.replPolicy = null;
+    this.replSrcTxnIds = null;
+  }
+
+  public int getNum_txns() {
+    return this.num_txns;
+  }
+
+  public void setNum_txns(int num_txns) {
+    this.num_txns = num_txns;
+    setNum_txnsIsSet(true);
+  }
+
+  public void unsetNum_txns() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUM_TXNS_ISSET_ID);
+  }
+
+  /** Returns true if field num_txns is set (has been assigned a value) and false otherwise */
+  public boolean isSetNum_txns() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUM_TXNS_ISSET_ID);
+  }
+
+  public void setNum_txnsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUM_TXNS_ISSET_ID, value);
+  }
+
+  public String getUser() {
+    return this.user;
+  }
+
+  public void setUser(String user) {
+    this.user = user;
+  }
+
+  public void unsetUser() {
+    this.user = null;
+  }
+
+  /** Returns true if field user is set (has been assigned a value) and false otherwise */
+  public boolean isSetUser() {
+    return this.user != null;
+  }
+
+  public void setUserIsSet(boolean value) {
+    if (!value) {
+      this.user = null;
+    }
+  }
+
+  public String getHostname() {
+    return this.hostname;
+  }
+
+  public void setHostname(String hostname) {
+    this.hostname = hostname;
+  }
+
+  public void unsetHostname() {
+    this.hostname = null;
+  }
+
+  /** Returns true if field hostname is set (has been assigned a value) and false otherwise */
+  public boolean isSetHostname() {
+    return this.hostname != null;
+  }
+
+  public void setHostnameIsSet(boolean value) {
+    if (!value) {
+      this.hostname = null;
+    }
+  }
+
+  public String getAgentInfo() {
+    return this.agentInfo;
+  }
+
+  public void setAgentInfo(String agentInfo) {
+    this.agentInfo = agentInfo;
+  }
+
+  public void unsetAgentInfo() {
+    this.agentInfo = null;
+  }
+
+  /** Returns true if field agentInfo is set (has been assigned a value) and false otherwise */
+  public boolean isSetAgentInfo() {
+    return this.agentInfo != null;
+  }
+
+  public void setAgentInfoIsSet(boolean value) {
+    if (!value) {
+      this.agentInfo = null;
+    }
+  }
+
+  public String getReplPolicy() {
+    return this.replPolicy;
+  }
+
+  public void setReplPolicy(String replPolicy) {
+    this.replPolicy = replPolicy;
+  }
+
+  public void unsetReplPolicy() {
+    this.replPolicy = null;
+  }
+
+  /** Returns true if field replPolicy is set (has been assigned a value) and false otherwise */
+  public boolean isSetReplPolicy() {
+    return this.replPolicy != null;
+  }
+
+  public void setReplPolicyIsSet(boolean value) {
+    if (!value) {
+      this.replPolicy = null;
+    }
+  }
+
+  public int getReplSrcTxnIdsSize() {
+    return (this.replSrcTxnIds == null) ? 0 : this.replSrcTxnIds.size();
+  }
+
+  public java.util.Iterator<Long> getReplSrcTxnIdsIterator() {
+    return (this.replSrcTxnIds == null) ? null : this.replSrcTxnIds.iterator();
+  }
+
+  public void addToReplSrcTxnIds(long elem) {
+    if (this.replSrcTxnIds == null) {
+      this.replSrcTxnIds = new ArrayList<Long>();
+    }
+    this.replSrcTxnIds.add(elem);
+  }
+
+  public List<Long> getReplSrcTxnIds() {
+    return this.replSrcTxnIds;
+  }
+
+  public void setReplSrcTxnIds(List<Long> replSrcTxnIds) {
+    this.replSrcTxnIds = replSrcTxnIds;
+  }
+
+  public void unsetReplSrcTxnIds() {
+    this.replSrcTxnIds = null;
+  }
+
+  /** Returns true if field replSrcTxnIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetReplSrcTxnIds() {
+    return this.replSrcTxnIds != null;
+  }
+
+  public void setReplSrcTxnIdsIsSet(boolean value) {
+    if (!value) {
+      this.replSrcTxnIds = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NUM_TXNS:
+      if (value == null) {
+        unsetNum_txns();
+      } else {
+        setNum_txns((Integer)value);
+      }
+      break;
+
+    case USER:
+      if (value == null) {
+        unsetUser();
+      } else {
+        setUser((String)value);
+      }
+      break;
+
+    case HOSTNAME:
+      if (value == null) {
+        unsetHostname();
+      } else {
+        setHostname((String)value);
+      }
+      break;
+
+    case AGENT_INFO:
+      if (value == null) {
+        unsetAgentInfo();
+      } else {
+        setAgentInfo((String)value);
+      }
+      break;
+
+    case REPL_POLICY:
+      if (value == null) {
+        unsetReplPolicy();
+      } else {
+        setReplPolicy((String)value);
+      }
+      break;
+
+    case REPL_SRC_TXN_IDS:
+      if (value == null) {
+        unsetReplSrcTxnIds();
+      } else {
+        setReplSrcTxnIds((List<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NUM_TXNS:
+      return getNum_txns();
+
+    case USER:
+      return getUser();
+
+    case HOSTNAME:
+      return getHostname();
+
+    case AGENT_INFO:
+      return getAgentInfo();
+
+    case REPL_POLICY:
+      return getReplPolicy();
+
+    case REPL_SRC_TXN_IDS:
+      return getReplSrcTxnIds();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NUM_TXNS:
+      return isSetNum_txns();
+    case USER:
+      return isSetUser();
+    case HOSTNAME:
+      return isSetHostname();
+    case AGENT_INFO:
+      return isSetAgentInfo();
+    case REPL_POLICY:
+      return isSetReplPolicy();
+    case REPL_SRC_TXN_IDS:
+      return isSetReplSrcTxnIds();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof OpenTxnRequest)
+      return this.equals((OpenTxnRequest)that);
+    return false;
+  }
+
+  public boolean equals(OpenTxnRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_num_txns = true;
+    boolean that_present_num_txns = true;
+    if (this_present_num_txns || that_present_num_txns) {
+      if (!(this_present_num_txns && that_present_num_txns))
+        return false;
+      if (this.num_txns != that.num_txns)
+        return false;
+    }
+
+    boolean this_present_user = true && this.isSetUser();
+    boolean that_present_user = true && that.isSetUser();
+    if (this_present_user || that_present_user) {
+      if (!(this_present_user && that_present_user))
+        return false;
+      if (!this.user.equals(that.user))
+        return false;
+    }
+
+    boolean this_present_hostname = true && this.isSetHostname();
+    boolean that_present_hostname = true && that.isSetHostname();
+    if (this_present_hostname || that_present_hostname) {
+      if (!(this_present_hostname && that_present_hostname))
+        return false;
+      if (!this.hostname.equals(that.hostname))
+        return false;
+    }
+
+    boolean this_present_agentInfo = true && this.isSetAgentInfo();
+    boolean that_present_agentInfo = true && that.isSetAgentInfo();
+    if (this_present_agentInfo || that_present_agentInfo) {
+      if (!(this_present_agentInfo && that_present_agentInfo))
+        return false;
+      if (!this.agentInfo.equals(that.agentInfo))
+        return false;
+    }
+
+    boolean this_present_replPolicy = true && this.isSetReplPolicy();
+    boolean that_present_replPolicy = true && that.isSetReplPolicy();
+    if (this_present_replPolicy || that_present_replPolicy) {
+      if (!(this_present_replPolicy && that_present_replPolicy))
+        return false;
+      if (!this.replPolicy.equals(that.replPolicy))
+        return false;
+    }
+
+    boolean this_present_replSrcTxnIds = true && this.isSetReplSrcTxnIds();
+    boolean that_present_replSrcTxnIds = true && that.isSetReplSrcTxnIds();
+    if (this_present_replSrcTxnIds || that_present_replSrcTxnIds) {
+      if (!(this_present_replSrcTxnIds && that_present_replSrcTxnIds))
+        return false;
+      if (!this.replSrcTxnIds.equals(that.replSrcTxnIds))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_num_txns = true;
+    list.add(present_num_txns);
+    if (present_num_txns)
+      list.add(num_txns);
+
+    boolean present_user = true && (isSetUser());
+    list.add(present_user);
+    if (present_user)
+      list.add(user);
+
+    boolean present_hostname = true && (isSetHostname());
+    list.add(present_hostname);
+    if (present_hostname)
+      list.add(hostname);
+
+    boolean present_agentInfo = true && (isSetAgentInfo());
+    list.add(present_agentInfo);
+    if (present_agentInfo)
+      list.add(agentInfo);
+
+    boolean present_replPolicy = true && (isSetReplPolicy());
+    list.add(present_replPolicy);
+    if (present_replPolicy)
+      list.add(replPolicy);
+
+    boolean present_replSrcTxnIds = true && (isSetReplSrcTxnIds());
+    list.add(present_replSrcTxnIds);
+    if (present_replSrcTxnIds)
+      list.add(replSrcTxnIds);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(OpenTxnRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetNum_txns()).compareTo(other.isSetNum_txns());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNum_txns()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.num_txns, other.num_txns);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetUser()).compareTo(other.isSetUser());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUser()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.user, other.user);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHostname()).compareTo(other.isSetHostname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHostname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hostname, other.hostname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAgentInfo()).compareTo(other.isSetAgentInfo());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAgentInfo()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.agentInfo, other.agentInfo);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetReplPolicy()).compareTo(other.isSetReplPolicy());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReplPolicy()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replPolicy, other.replPolicy);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetReplSrcTxnIds()).compareTo(other.isSetReplSrcTxnIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReplSrcTxnIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replSrcTxnIds, other.replSrcTxnIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("OpenTxnRequest(");
+    boolean first = true;
+
+    sb.append("num_txns:");
+    sb.append(this.num_txns);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("user:");
+    if (this.user == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.user);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("hostname:");
+    if (this.hostname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.hostname);
+    }
+    first = false;
+    if (isSetAgentInfo()) {
+      if (!first) sb.append(", ");
+      sb.append("agentInfo:");
+      if (this.agentInfo == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.agentInfo);
+      }
+      first = false;
+    }
+    if (isSetReplPolicy()) {
+      if (!first) sb.append(", ");
+      sb.append("replPolicy:");
+      if (this.replPolicy == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.replPolicy);
+      }
+      first = false;
+    }
+    if (isSetReplSrcTxnIds()) {
+      if (!first) sb.append(", ");
+      sb.append("replSrcTxnIds:");
+      if (this.replSrcTxnIds == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.replSrcTxnIds);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNum_txns()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'num_txns' is unset! Struct:" + toString());
+    }
+
+    if (!isSetUser()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'user' is unset! Struct:" + toString());
+    }
+
+    if (!isSetHostname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hostname' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class OpenTxnRequestStandardSchemeFactory implements SchemeFactory {
+    public OpenTxnRequestStandardScheme getScheme() {
+      return new OpenTxnRequestStandardScheme();
+    }
+  }
+
+  private static class OpenTxnRequestStandardScheme extends StandardScheme<OpenTxnRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NUM_TXNS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.num_txns = iprot.readI32();
+              struct.setNum_txnsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // USER
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.user = iprot.readString();
+              struct.setUserIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // HOSTNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.hostname = iprot.readString();
+              struct.setHostnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // AGENT_INFO
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.agentInfo = iprot.readString();
+              struct.setAgentInfoIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // REPL_POLICY
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.replPolicy = iprot.readString();
+              struct.setReplPolicyIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // REPL_SRC_TXN_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list570 = iprot.readListBegin();
+                struct.replSrcTxnIds = new ArrayList<Long>(_list570.size);
+                long _elem571;
+                for (int _i572 = 0; _i572 < _list570.size; ++_i572)
+                {
+                  _elem571 = iprot.readI64();
+                  struct.replSrcTxnIds.add(_elem571);
+                }
+                iprot.readListEnd();
+              }
+              struct.setReplSrcTxnIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(NUM_TXNS_FIELD_DESC);
+      oprot.writeI32(struct.num_txns);
+      oprot.writeFieldEnd();
+      if (struct.user != null) {
+        oprot.writeFieldBegin(USER_FIELD_DESC);
+        oprot.writeString(struct.user);
+        oprot.writeFieldEnd();
+      }
+      if (struct.hostname != null) {
+        oprot.writeFieldBegin(HOSTNAME_FIELD_DESC);
+        oprot.writeString(struct.hostname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.agentInfo != null) {
+        if (struct.isSetAgentInfo()) {
+          oprot.writeFieldBegin(AGENT_INFO_FIELD_DESC);
+          oprot.writeString(struct.agentInfo);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.replPolicy != null) {
+        if (struct.isSetReplPolicy()) {
+          oprot.writeFieldBegin(REPL_POLICY_FIELD_DESC);
+          oprot.writeString(struct.replPolicy);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.replSrcTxnIds != null) {
+        if (struct.isSetReplSrcTxnIds()) {
+          oprot.writeFieldBegin(REPL_SRC_TXN_IDS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.replSrcTxnIds.size()));
+            for (long _iter573 : struct.replSrcTxnIds)
+            {
+              oprot.writeI64(_iter573);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class OpenTxnRequestTupleSchemeFactory implements SchemeFactory {
+    public OpenTxnRequestTupleScheme getScheme() {
+      return new OpenTxnRequestTupleScheme();
+    }
+  }
+
+  private static class OpenTxnRequestTupleScheme extends TupleScheme<OpenTxnRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI32(struct.num_txns);
+      oprot.writeString(struct.user);
+      oprot.writeString(struct.hostname);
+      BitSet optionals = new BitSet();
+      if (struct.isSetAgentInfo()) {
+        optionals.set(0);
+      }
+      if (struct.isSetReplPolicy()) {
+        optionals.set(1);
+      }
+      if (struct.isSetReplSrcTxnIds()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetAgentInfo()) {
+        oprot.writeString(struct.agentInfo);
+      }
+      if (struct.isSetReplPolicy()) {
+        oprot.writeString(struct.replPolicy);
+      }
+      if (struct.isSetReplSrcTxnIds()) {
+        {
+          oprot.writeI32(struct.replSrcTxnIds.size());
+          for (long _iter574 : struct.replSrcTxnIds)
+          {
+            oprot.writeI64(_iter574);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.num_txns = iprot.readI32();
+      struct.setNum_txnsIsSet(true);
+      struct.user = iprot.readString();
+      struct.setUserIsSet(true);
+      struct.hostname = iprot.readString();
+      struct.setHostnameIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.agentInfo = iprot.readString();
+        struct.setAgentInfoIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.replPolicy = iprot.readString();
+        struct.setReplPolicyIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TList _list575 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.replSrcTxnIds = new ArrayList<Long>(_list575.size);
+          long _elem576;
+          for (int _i577 = 0; _i577 < _list575.size; ++_i577)
+          {
+            _elem576 = iprot.readI64();
+            struct.replSrcTxnIds.add(_elem576);
+          }
+        }
+        struct.setReplSrcTxnIdsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
new file mode 100644
index 0000000..9e38d6c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class OpenTxnsResponse implements org.apache.thrift.TBase<OpenTxnsResponse, OpenTxnsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<OpenTxnsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("OpenTxnsResponse");
+
+  private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_ids", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new OpenTxnsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new OpenTxnsResponseTupleSchemeFactory());
+  }
+
+  private List<Long> txn_ids; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXN_IDS((short)1, "txn_ids");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXN_IDS
+          return TXN_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txn_ids", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(OpenTxnsResponse.class, metaDataMap);
+  }
+
+  public OpenTxnsResponse() {
+  }
+
+  public OpenTxnsResponse(
+    List<Long> txn_ids)
+  {
+    this();
+    this.txn_ids = txn_ids;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public OpenTxnsResponse(OpenTxnsResponse other) {
+    if (other.isSetTxn_ids()) {
+      List<Long> __this__txn_ids = new ArrayList<Long>(other.txn_ids);
+      this.txn_ids = __this__txn_ids;
+    }
+  }
+
+  public OpenTxnsResponse deepCopy() {
+    return new OpenTxnsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.txn_ids = null;
+  }
+
+  public int getTxn_idsSize() {
+    return (this.txn_ids == null) ? 0 : this.txn_ids.size();
+  }
+
+  public java.util.Iterator<Long> getTxn_idsIterator() {
+    return (this.txn_ids == null) ? null : this.txn_ids.iterator();
+  }
+
+  public void addToTxn_ids(long elem) {
+    if (this.txn_ids == null) {
+      this.txn_ids = new ArrayList<Long>();
+    }
+    this.txn_ids.add(elem);
+  }
+
+  public List<Long> getTxn_ids() {
+    return this.txn_ids;
+  }
+
+  public void setTxn_ids(List<Long> txn_ids) {
+    this.txn_ids = txn_ids;
+  }
+
+  public void unsetTxn_ids() {
+    this.txn_ids = null;
+  }
+
+  /** Returns true if field txn_ids is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxn_ids() {
+    return this.txn_ids != null;
+  }
+
+  public void setTxn_idsIsSet(boolean value) {
+    if (!value) {
+      this.txn_ids = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXN_IDS:
+      if (value == null) {
+        unsetTxn_ids();
+      } else {
+        setTxn_ids((List<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXN_IDS:
+      return getTxn_ids();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXN_IDS:
+      return isSetTxn_ids();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof OpenTxnsResponse)
+      return this.equals((OpenTxnsResponse)that);
+    return false;
+  }
+
+  public boolean equals(OpenTxnsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txn_ids = true && this.isSetTxn_ids();
+    boolean that_present_txn_ids = true && that.isSetTxn_ids();
+    if (this_present_txn_ids || that_present_txn_ids) {
+      if (!(this_present_txn_ids && that_present_txn_ids))
+        return false;
+      if (!this.txn_ids.equals(that.txn_ids))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txn_ids = true && (isSetTxn_ids());
+    list.add(present_txn_ids);
+    if (present_txn_ids)
+      list.add(txn_ids);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(OpenTxnsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxn_ids()).compareTo(other.isSetTxn_ids());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxn_ids()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txn_ids, other.txn_ids);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("OpenTxnsResponse(");
+    boolean first = true;
+
+    sb.append("txn_ids:");
+    if (this.txn_ids == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.txn_ids);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxn_ids()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txn_ids' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class OpenTxnsResponseStandardSchemeFactory implements SchemeFactory {
+    public OpenTxnsResponseStandardScheme getScheme() {
+      return new OpenTxnsResponseStandardScheme();
+    }
+  }
+
+  private static class OpenTxnsResponseStandardScheme extends StandardScheme<OpenTxnsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXN_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list578 = iprot.readListBegin();
+                struct.txn_ids = new ArrayList<Long>(_list578.size);
+                long _elem579;
+                for (int _i580 = 0; _i580 < _list578.size; ++_i580)
+                {
+                  _elem579 = iprot.readI64();
+                  struct.txn_ids.add(_elem579);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTxn_idsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.txn_ids != null) {
+        oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size()));
+          for (long _iter581 : struct.txn_ids)
+          {
+            oprot.writeI64(_iter581);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class OpenTxnsResponseTupleSchemeFactory implements SchemeFactory {
+    public OpenTxnsResponseTupleScheme getScheme() {
+      return new OpenTxnsResponseTupleScheme();
+    }
+  }
+
+  private static class OpenTxnsResponseTupleScheme extends TupleScheme<OpenTxnsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.txn_ids.size());
+        for (long _iter582 : struct.txn_ids)
+        {
+          oprot.writeI64(_iter582);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list583 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.txn_ids = new ArrayList<Long>(_list583.size);
+        long _elem584;
+        for (int _i585 = 0; _i585 < _list583.size; ++_i585)
+        {
+          _elem584 = iprot.readI64();
+          struct.txn_ids.add(_elem584);
+        }
+      }
+      struct.setTxn_idsIsSet(true);
+    }
+  }
+
+}
+


[81/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
index 0000000,a83017b..187aceb
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php
@@@ -1,0 -1,59951 +1,60225 @@@
+ <?php
+ namespace metastore;
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ use Thrift\Base\TBase;
+ use Thrift\Type\TType;
+ use Thrift\Type\TMessageType;
+ use Thrift\Exception\TException;
+ use Thrift\Exception\TProtocolException;
+ use Thrift\Protocol\TProtocol;
+ use Thrift\Protocol\TBinaryProtocolAccelerated;
+ use Thrift\Exception\TApplicationException;
+ 
+ 
+ /**
+  * This interface is live.
+  */
+ interface ThriftHiveMetastoreIf extends \FacebookServiceIf {
+   /**
+    * @param string $key
+    * @return string
+    * @throws \metastore\MetaException
+    */
+   public function getMetaConf($key);
+   /**
+    * @param string $key
+    * @param string $value
+    * @throws \metastore\MetaException
+    */
+   public function setMetaConf($key, $value);
+   /**
+    * @param \metastore\CreateCatalogRequest $catalog
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_catalog(\metastore\CreateCatalogRequest $catalog);
+   /**
+    * @param \metastore\AlterCatalogRequest $rqst
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_catalog(\metastore\AlterCatalogRequest $rqst);
+   /**
+    * @param \metastore\GetCatalogRequest $catName
+    * @return \metastore\GetCatalogResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_catalog(\metastore\GetCatalogRequest $catName);
+   /**
+    * @return \metastore\GetCatalogsResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_catalogs();
+   /**
+    * @param \metastore\DropCatalogRequest $catName
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_catalog(\metastore\DropCatalogRequest $catName);
+   /**
+    * @param \metastore\Database $database
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_database(\metastore\Database $database);
+   /**
+    * @param string $name
+    * @return \metastore\Database
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_database($name);
+   /**
+    * @param string $name
+    * @param bool $deleteData
+    * @param bool $cascade
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_database($name, $deleteData, $cascade);
+   /**
+    * @param string $pattern
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_databases($pattern);
+   /**
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_all_databases();
+   /**
+    * @param string $dbname
+    * @param \metastore\Database $db
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function alter_database($dbname, \metastore\Database $db);
+   /**
+    * @param string $name
+    * @return \metastore\Type
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_type($name);
+   /**
+    * @param \metastore\Type $type
+    * @return bool
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_type(\metastore\Type $type);
+   /**
+    * @param string $type
+    * @return bool
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function drop_type($type);
+   /**
+    * @param string $name
+    * @return array
+    * @throws \metastore\MetaException
+    */
+   public function get_type_all($name);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_fields($db_name, $table_name);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_fields_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_schema($db_name, $table_name);
+   /**
+    * @param string $db_name
+    * @param string $table_name
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\FieldSchema[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_schema_with_environment_context($db_name, $table_name, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param \metastore\Table $tbl
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function create_table(\metastore\Table $tbl);
+   /**
+    * @param \metastore\Table $tbl
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function create_table_with_environment_context(\metastore\Table $tbl, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param \metastore\Table $tbl
+    * @param \metastore\SQLPrimaryKey[] $primaryKeys
+    * @param \metastore\SQLForeignKey[] $foreignKeys
+    * @param \metastore\SQLUniqueConstraint[] $uniqueConstraints
+    * @param \metastore\SQLNotNullConstraint[] $notNullConstraints
+    * @param \metastore\SQLDefaultConstraint[] $defaultConstraints
+    * @param \metastore\SQLCheckConstraint[] $checkConstraints
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function create_table_with_constraints(\metastore\Table $tbl, array $primaryKeys, array $foreignKeys, array $uniqueConstraints, array $notNullConstraints, array $defaultConstraints, array $checkConstraints);
+   /**
+    * @param \metastore\DropConstraintRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_constraint(\metastore\DropConstraintRequest $req);
+   /**
+    * @param \metastore\AddPrimaryKeyRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_primary_key(\metastore\AddPrimaryKeyRequest $req);
+   /**
+    * @param \metastore\AddForeignKeyRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_foreign_key(\metastore\AddForeignKeyRequest $req);
+   /**
+    * @param \metastore\AddUniqueConstraintRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_unique_constraint(\metastore\AddUniqueConstraintRequest $req);
+   /**
+    * @param \metastore\AddNotNullConstraintRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_not_null_constraint(\metastore\AddNotNullConstraintRequest $req);
+   /**
+    * @param \metastore\AddDefaultConstraintRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_default_constraint(\metastore\AddDefaultConstraintRequest $req);
+   /**
+    * @param \metastore\AddCheckConstraintRequest $req
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_check_constraint(\metastore\AddCheckConstraintRequest $req);
+   /**
+    * @param string $dbname
+    * @param string $name
+    * @param bool $deleteData
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_table($dbname, $name, $deleteData);
+   /**
+    * @param string $dbname
+    * @param string $name
+    * @param bool $deleteData
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_table_with_environment_context($dbname, $name, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $dbName
+    * @param string $tableName
+    * @param string[] $partNames
+    * @throws \metastore\MetaException
+    */
+   public function truncate_table($dbName, $tableName, array $partNames);
+   /**
+    * @param string $db_name
+    * @param string $pattern
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_tables($db_name, $pattern);
+   /**
+    * @param string $db_name
+    * @param string $pattern
+    * @param string $tableType
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_tables_by_type($db_name, $pattern, $tableType);
+   /**
+    * @param string $db_name
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_materialized_views_for_rewriting($db_name);
+   /**
+    * @param string $db_patterns
+    * @param string $tbl_patterns
+    * @param string[] $tbl_types
+    * @return \metastore\TableMeta[]
+    * @throws \metastore\MetaException
+    */
+   public function get_table_meta($db_patterns, $tbl_patterns, array $tbl_types);
+   /**
+    * @param string $db_name
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_all_tables($db_name);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @return \metastore\Table
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_table($dbname, $tbl_name);
+   /**
+    * @param string $dbname
+    * @param string[] $tbl_names
+    * @return \metastore\Table[]
+    */
+   public function get_table_objects_by_name($dbname, array $tbl_names);
+   /**
+    * @param \metastore\GetTableRequest $req
+    * @return \metastore\GetTableResult
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_table_req(\metastore\GetTableRequest $req);
+   /**
+    * @param \metastore\GetTablesRequest $req
+    * @return \metastore\GetTablesResult
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_table_objects_by_name_req(\metastore\GetTablesRequest $req);
+   /**
+    * @param string $dbname
+    * @param string[] $tbl_names
+    * @return array
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_materialization_invalidation_info($dbname, array $tbl_names);
+   /**
+    * @param string $catName
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\CreationMetadata $creation_metadata
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\UnknownDBException
+    */
+   public function update_creation_metadata($catName, $dbname, $tbl_name, \metastore\CreationMetadata $creation_metadata);
+   /**
+    * @param string $dbname
+    * @param string $filter
+    * @param int $max_tables
+    * @return string[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\UnknownDBException
+    */
+   public function get_table_names_by_filter($dbname, $filter, $max_tables);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\Table $new_tbl
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_table($dbname, $tbl_name, \metastore\Table $new_tbl);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\Table $new_tbl
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_table_with_environment_context($dbname, $tbl_name, \metastore\Table $new_tbl, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $dbname
+    * @param string $tbl_name
+    * @param \metastore\Table $new_tbl
+    * @param bool $cascade
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_table_with_cascade($dbname, $tbl_name, \metastore\Table $new_tbl, $cascade);
+   /**
+    * @param \metastore\Partition $new_part
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partition(\metastore\Partition $new_part);
+   /**
+    * @param \metastore\Partition $new_part
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partition_with_environment_context(\metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param \metastore\Partition[] $new_parts
+    * @return int
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partitions(array $new_parts);
+   /**
+    * @param \metastore\PartitionSpec[] $new_parts
+    * @return int
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partitions_pspec(array $new_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function append_partition($db_name, $tbl_name, array $part_vals);
+   /**
+    * @param \metastore\AddPartitionsRequest $request
+    * @return \metastore\AddPartitionsResult
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_partitions_req(\metastore\AddPartitionsRequest $request);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function append_partition_with_environment_context($db_name, $tbl_name, array $part_vals, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function append_partition_by_name($db_name, $tbl_name, $part_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return \metastore\Partition
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function append_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param bool $deleteData
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partition($db_name, $tbl_name, array $part_vals, $deleteData);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param bool $deleteData
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partition_with_environment_context($db_name, $tbl_name, array $part_vals, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param bool $deleteData
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partition_by_name($db_name, $tbl_name, $part_name, $deleteData);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param bool $deleteData
+    * @param \metastore\EnvironmentContext $environment_context
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partition_by_name_with_environment_context($db_name, $tbl_name, $part_name, $deleteData, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param \metastore\DropPartitionsRequest $req
+    * @return \metastore\DropPartitionsResult
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_partitions_req(\metastore\DropPartitionsRequest $req);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition($db_name, $tbl_name, array $part_vals);
+   /**
+    * @param array $partitionSpecs
+    * @param string $source_db
+    * @param string $source_table_name
+    * @param string $dest_db
+    * @param string $dest_table_name
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
+   public function exchange_partition(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
+   /**
+    * @param array $partitionSpecs
+    * @param string $source_db
+    * @param string $source_table_name
+    * @param string $dest_db
+    * @param string $dest_table_name
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
+   public function exchange_partitions(array $partitionSpecs, $source_db, $source_table_name, $dest_db, $dest_table_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition_with_auth($db_name, $tbl_name, array $part_vals, $user_name, array $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @return \metastore\Partition
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition_by_name($db_name, $tbl_name, $part_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @return \metastore\Partition[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions($db_name, $tbl_name, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\Partition[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions_with_auth($db_name, $tbl_name, $max_parts, $user_name, array $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @return \metastore\PartitionSpec[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions_pspec($db_name, $tbl_name, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param int $max_parts
+    * @return string[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partition_names($db_name, $tbl_name, $max_parts);
+   /**
+    * @param \metastore\PartitionValuesRequest $request
+    * @return \metastore\PartitionValuesResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition_values(\metastore\PartitionValuesRequest $request);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param int $max_parts
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partitions_ps($db_name, $tbl_name, array $part_vals, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param int $max_parts
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\Partition[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions_ps_with_auth($db_name, $tbl_name, array $part_vals, $max_parts, $user_name, array $group_names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param int $max_parts
+    * @return string[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partition_names_ps($db_name, $tbl_name, array $part_vals, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $filter
+    * @param int $max_parts
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partitions_by_filter($db_name, $tbl_name, $filter, $max_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $filter
+    * @param int $max_parts
+    * @return \metastore\PartitionSpec[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_part_specs_by_filter($db_name, $tbl_name, $filter, $max_parts);
+   /**
+    * @param \metastore\PartitionsByExprRequest $req
+    * @return \metastore\PartitionsByExprResult
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partitions_by_expr(\metastore\PartitionsByExprRequest $req);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $filter
+    * @return int
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_num_partitions_by_filter($db_name, $tbl_name, $filter);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $names
+    * @return \metastore\Partition[]
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_partitions_by_names($db_name, $tbl_name, array $names);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition $new_part
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_partition($db_name, $tbl_name, \metastore\Partition $new_part);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition[] $new_parts
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_partitions($db_name, $tbl_name, array $new_parts);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition[] $new_parts
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_partitions_with_environment_context($db_name, $tbl_name, array $new_parts, \metastore\EnvironmentContext $environment_context);
+   /**
++   * @param \metastore\AlterPartitionsRequest $req
++   * @return \metastore\AlterPartitionsResponse
++   * @throws \metastore\InvalidOperationException
++   * @throws \metastore\MetaException
++   */
++  public function alter_partitions_with_environment_context_req(\metastore\AlterPartitionsRequest $req);
++  /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param \metastore\Partition $new_part
+    * @param \metastore\EnvironmentContext $environment_context
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_partition_with_environment_context($db_name, $tbl_name, \metastore\Partition $new_part, \metastore\EnvironmentContext $environment_context);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string[] $part_vals
+    * @param \metastore\Partition $new_part
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function rename_partition($db_name, $tbl_name, array $part_vals, \metastore\Partition $new_part);
+   /**
+    * @param string[] $part_vals
+    * @param bool $throw_exception
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function partition_name_has_valid_characters(array $part_vals, $throw_exception);
+   /**
+    * @param string $name
+    * @param string $defaultValue
+    * @return string
+    * @throws \metastore\ConfigValSecurityException
+    */
+   public function get_config_value($name, $defaultValue);
+   /**
+    * @param string $part_name
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function partition_name_to_vals($part_name);
+   /**
+    * @param string $part_name
+    * @return array
+    * @throws \metastore\MetaException
+    */
+   public function partition_name_to_spec($part_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param array $part_vals
+    * @param int $eventType
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\UnknownDBException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownPartitionException
+    * @throws \metastore\InvalidPartitionException
+    */
+   public function markPartitionForEvent($db_name, $tbl_name, array $part_vals, $eventType);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param array $part_vals
+    * @param int $eventType
+    * @return bool
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\UnknownDBException
+    * @throws \metastore\UnknownTableException
+    * @throws \metastore\UnknownPartitionException
+    * @throws \metastore\InvalidPartitionException
+    */
+   public function isPartitionMarkedForEvent($db_name, $tbl_name, array $part_vals, $eventType);
+   /**
+    * @param \metastore\PrimaryKeysRequest $request
+    * @return \metastore\PrimaryKeysResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_primary_keys(\metastore\PrimaryKeysRequest $request);
+   /**
+    * @param \metastore\ForeignKeysRequest $request
+    * @return \metastore\ForeignKeysResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_foreign_keys(\metastore\ForeignKeysRequest $request);
+   /**
+    * @param \metastore\UniqueConstraintsRequest $request
+    * @return \metastore\UniqueConstraintsResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_unique_constraints(\metastore\UniqueConstraintsRequest $request);
+   /**
+    * @param \metastore\NotNullConstraintsRequest $request
+    * @return \metastore\NotNullConstraintsResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_not_null_constraints(\metastore\NotNullConstraintsRequest $request);
+   /**
+    * @param \metastore\DefaultConstraintsRequest $request
+    * @return \metastore\DefaultConstraintsResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_default_constraints(\metastore\DefaultConstraintsRequest $request);
+   /**
+    * @param \metastore\CheckConstraintsRequest $request
+    * @return \metastore\CheckConstraintsResponse
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_check_constraints(\metastore\CheckConstraintsRequest $request);
+   /**
+    * @param \metastore\ColumnStatistics $stats_obj
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    */
+   public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj);
+   /**
+    * @param \metastore\ColumnStatistics $stats_obj
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    */
+   public function update_partition_column_statistics(\metastore\ColumnStatistics $stats_obj);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $col_name
+    * @return \metastore\ColumnStatistics
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    * @throws \metastore\InvalidObjectException
+    */
+   public function get_table_column_statistics($db_name, $tbl_name, $col_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param string $col_name
+    * @return \metastore\ColumnStatistics
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    * @throws \metastore\InvalidObjectException
+    */
+   public function get_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name);
+   /**
+    * @param \metastore\TableStatsRequest $request
+    * @return \metastore\TableStatsResult
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_table_statistics_req(\metastore\TableStatsRequest $request);
+   /**
+    * @param \metastore\PartitionsStatsRequest $request
+    * @return \metastore\PartitionsStatsResult
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_partitions_statistics_req(\metastore\PartitionsStatsRequest $request);
+   /**
+    * @param \metastore\PartitionsStatsRequest $request
+    * @return \metastore\AggrStats
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_aggr_stats_for(\metastore\PartitionsStatsRequest $request);
+   /**
+    * @param \metastore\SetPartitionsStatsRequest $request
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidInputException
+    */
+   public function set_aggr_stats_for(\metastore\SetPartitionsStatsRequest $request);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $part_name
+    * @param string $col_name
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
+   public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name);
+   /**
+    * @param string $db_name
+    * @param string $tbl_name
+    * @param string $col_name
+    * @return bool
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\InvalidInputException
+    */
+   public function delete_table_column_statistics($db_name, $tbl_name, $col_name);
+   /**
+    * @param \metastore\Function $func
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function create_function(\metastore\Function $func);
+   /**
+    * @param string $dbName
+    * @param string $funcName
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_function($dbName, $funcName);
+   /**
+    * @param string $dbName
+    * @param string $funcName
+    * @param \metastore\Function $newFunc
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_function($dbName, $funcName, \metastore\Function $newFunc);
+   /**
+    * @param string $dbName
+    * @param string $pattern
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_functions($dbName, $pattern);
+   /**
+    * @param string $dbName
+    * @param string $funcName
+    * @return \metastore\Function
+    * @throws \metastore\MetaException
+    * @throws \metastore\NoSuchObjectException
+    */
+   public function get_function($dbName, $funcName);
+   /**
+    * @return \metastore\GetAllFunctionsResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_all_functions();
+   /**
+    * @param \metastore\Role $role
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function create_role(\metastore\Role $role);
+   /**
+    * @param string $role_name
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function drop_role($role_name);
+   /**
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function get_role_names();
+   /**
+    * @param string $role_name
+    * @param string $principal_name
+    * @param int $principal_type
+    * @param string $grantor
+    * @param int $grantorType
+    * @param bool $grant_option
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function grant_role($role_name, $principal_name, $principal_type, $grantor, $grantorType, $grant_option);
+   /**
+    * @param string $role_name
+    * @param string $principal_name
+    * @param int $principal_type
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function revoke_role($role_name, $principal_name, $principal_type);
+   /**
+    * @param string $principal_name
+    * @param int $principal_type
+    * @return \metastore\Role[]
+    * @throws \metastore\MetaException
+    */
+   public function list_roles($principal_name, $principal_type);
+   /**
+    * @param \metastore\GrantRevokeRoleRequest $request
+    * @return \metastore\GrantRevokeRoleResponse
+    * @throws \metastore\MetaException
+    */
+   public function grant_revoke_role(\metastore\GrantRevokeRoleRequest $request);
+   /**
+    * @param \metastore\GetPrincipalsInRoleRequest $request
+    * @return \metastore\GetPrincipalsInRoleResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_principals_in_role(\metastore\GetPrincipalsInRoleRequest $request);
+   /**
+    * @param \metastore\GetRoleGrantsForPrincipalRequest $request
+    * @return \metastore\GetRoleGrantsForPrincipalResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_role_grants_for_principal(\metastore\GetRoleGrantsForPrincipalRequest $request);
+   /**
+    * @param \metastore\HiveObjectRef $hiveObject
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return \metastore\PrincipalPrivilegeSet
+    * @throws \metastore\MetaException
+    */
+   public function get_privilege_set(\metastore\HiveObjectRef $hiveObject, $user_name, array $group_names);
+   /**
+    * @param string $principal_name
+    * @param int $principal_type
+    * @param \metastore\HiveObjectRef $hiveObject
+    * @return \metastore\HiveObjectPrivilege[]
+    * @throws \metastore\MetaException
+    */
+   public function list_privileges($principal_name, $principal_type, \metastore\HiveObjectRef $hiveObject);
+   /**
+    * @param \metastore\PrivilegeBag $privileges
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function grant_privileges(\metastore\PrivilegeBag $privileges);
+   /**
+    * @param \metastore\PrivilegeBag $privileges
+    * @return bool
+    * @throws \metastore\MetaException
+    */
+   public function revoke_privileges(\metastore\PrivilegeBag $privileges);
+   /**
+    * @param \metastore\GrantRevokePrivilegeRequest $request
+    * @return \metastore\GrantRevokePrivilegeResponse
+    * @throws \metastore\MetaException
+    */
+   public function grant_revoke_privileges(\metastore\GrantRevokePrivilegeRequest $request);
+   /**
+    * @param \metastore\HiveObjectRef $objToRefresh
+    * @param string $authorizer
+    * @param \metastore\GrantRevokePrivilegeRequest $grantRequest
+    * @return \metastore\GrantRevokePrivilegeResponse
+    * @throws \metastore\MetaException
+    */
+   public function refresh_privileges(\metastore\HiveObjectRef $objToRefresh, $authorizer, \metastore\GrantRevokePrivilegeRequest $grantRequest);
+   /**
+    * @param string $user_name
+    * @param string[] $group_names
+    * @return string[]
+    * @throws \metastore\MetaException
+    */
+   public function set_ugi($user_name, array $group_names);
+   /**
+    * @param string $token_owner
+    * @param string $renewer_kerberos_principal_name
+    * @return string
+    * @throws \metastore\MetaException
+    */
+   public function get_delegation_token($token_owner, $renewer_kerberos_principal_name);
+   /**
+    * @param string $token_str_form
+    * @return int
+    * @throws \metastore\MetaException
+    */
+   public function renew_delegation_token($token_str_form);
+   /**
+    * @param string $token_str_form
+    * @throws \metastore\MetaException
+    */
+   public function cancel_delegation_token($token_str_form);
+   /**
+    * @param string $token_identifier
+    * @param string $delegation_token
+    * @return bool
+    */
+   public function add_token($token_identifier, $delegation_token);
+   /**
+    * @param string $token_identifier
+    * @return bool
+    */
+   public function remove_token($token_identifier);
+   /**
+    * @param string $token_identifier
+    * @return string
+    */
+   public function get_token($token_identifier);
+   /**
+    * @return string[]
+    */
+   public function get_all_token_identifiers();
+   /**
+    * @param string $key
+    * @return int
+    * @throws \metastore\MetaException
+    */
+   public function add_master_key($key);
+   /**
+    * @param int $seq_number
+    * @param string $key
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function update_master_key($seq_number, $key);
+   /**
+    * @param int $key_seq
+    * @return bool
+    */
+   public function remove_master_key($key_seq);
+   /**
+    * @return string[]
+    */
+   public function get_master_keys();
+   /**
+    * @return \metastore\GetOpenTxnsResponse
+    */
+   public function get_open_txns();
+   /**
+    * @return \metastore\GetOpenTxnsInfoResponse
+    */
+   public function get_open_txns_info();
+   /**
+    * @param \metastore\OpenTxnRequest $rqst
+    * @return \metastore\OpenTxnsResponse
+    */
+   public function open_txns(\metastore\OpenTxnRequest $rqst);
+   /**
+    * @param \metastore\AbortTxnRequest $rqst
+    * @throws \metastore\NoSuchTxnException
+    */
+   public function abort_txn(\metastore\AbortTxnRequest $rqst);
+   /**
+    * @param \metastore\AbortTxnsRequest $rqst
+    * @throws \metastore\NoSuchTxnException
+    */
+   public function abort_txns(\metastore\AbortTxnsRequest $rqst);
+   /**
+    * @param \metastore\CommitTxnRequest $rqst
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
+   public function commit_txn(\metastore\CommitTxnRequest $rqst);
+   /**
+    * @param \metastore\ReplTblWriteIdStateRequest $rqst
+    */
+   public function repl_tbl_writeid_state(\metastore\ReplTblWriteIdStateRequest $rqst);
+   /**
+    * @param \metastore\GetValidWriteIdsRequest $rqst
+    * @return \metastore\GetValidWriteIdsResponse
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\MetaException
+    */
+   public function get_valid_write_ids(\metastore\GetValidWriteIdsRequest $rqst);
+   /**
+    * @param \metastore\AllocateTableWriteIdsRequest $rqst
+    * @return \metastore\AllocateTableWriteIdsResponse
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    * @throws \metastore\MetaException
+    */
+   public function allocate_table_write_ids(\metastore\AllocateTableWriteIdsRequest $rqst);
+   /**
+    * @param \metastore\LockRequest $rqst
+    * @return \metastore\LockResponse
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
+   public function lock(\metastore\LockRequest $rqst);
+   /**
+    * @param \metastore\CheckLockRequest $rqst
+    * @return \metastore\LockResponse
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    * @throws \metastore\NoSuchLockException
+    */
+   public function check_lock(\metastore\CheckLockRequest $rqst);
+   /**
+    * @param \metastore\UnlockRequest $rqst
+    * @throws \metastore\NoSuchLockException
+    * @throws \metastore\TxnOpenException
+    */
+   public function unlock(\metastore\UnlockRequest $rqst);
+   /**
+    * @param \metastore\ShowLocksRequest $rqst
+    * @return \metastore\ShowLocksResponse
+    */
+   public function show_locks(\metastore\ShowLocksRequest $rqst);
+   /**
+    * @param \metastore\HeartbeatRequest $ids
+    * @throws \metastore\NoSuchLockException
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
+   public function heartbeat(\metastore\HeartbeatRequest $ids);
+   /**
+    * @param \metastore\HeartbeatTxnRangeRequest $txns
+    * @return \metastore\HeartbeatTxnRangeResponse
+    */
+   public function heartbeat_txn_range(\metastore\HeartbeatTxnRangeRequest $txns);
+   /**
+    * @param \metastore\CompactionRequest $rqst
+    */
+   public function compact(\metastore\CompactionRequest $rqst);
+   /**
+    * @param \metastore\CompactionRequest $rqst
+    * @return \metastore\CompactionResponse
+    */
+   public function compact2(\metastore\CompactionRequest $rqst);
+   /**
+    * @param \metastore\ShowCompactRequest $rqst
+    * @return \metastore\ShowCompactResponse
+    */
+   public function show_compact(\metastore\ShowCompactRequest $rqst);
+   /**
+    * @param \metastore\AddDynamicPartitions $rqst
+    * @throws \metastore\NoSuchTxnException
+    * @throws \metastore\TxnAbortedException
+    */
+   public function add_dynamic_partitions(\metastore\AddDynamicPartitions $rqst);
+   /**
+    * @param \metastore\NotificationEventRequest $rqst
+    * @return \metastore\NotificationEventResponse
+    */
+   public function get_next_notification(\metastore\NotificationEventRequest $rqst);
+   /**
+    * @return \metastore\CurrentNotificationEventId
+    */
+   public function get_current_notificationEventId();
+   /**
+    * @param \metastore\NotificationEventsCountRequest $rqst
+    * @return \metastore\NotificationEventsCountResponse
+    */
+   public function get_notification_events_count(\metastore\NotificationEventsCountRequest $rqst);
+   /**
+    * @param \metastore\FireEventRequest $rqst
+    * @return \metastore\FireEventResponse
+    */
+   public function fire_listener_event(\metastore\FireEventRequest $rqst);
+   /**
+    */
+   public function flushCache();
+   /**
+    * @param \metastore\WriteNotificationLogRequest $rqst
+    * @return \metastore\WriteNotificationLogResponse
+    */
+   public function add_write_notification_log(\metastore\WriteNotificationLogRequest $rqst);
+   /**
+    * @param \metastore\CmRecycleRequest $request
+    * @return \metastore\CmRecycleResponse
+    * @throws \metastore\MetaException
+    */
+   public function cm_recycle(\metastore\CmRecycleRequest $request);
+   /**
+    * @param \metastore\GetFileMetadataByExprRequest $req
+    * @return \metastore\GetFileMetadataByExprResult
+    */
+   public function get_file_metadata_by_expr(\metastore\GetFileMetadataByExprRequest $req);
+   /**
+    * @param \metastore\GetFileMetadataRequest $req
+    * @return \metastore\GetFileMetadataResult
+    */
+   public function get_file_metadata(\metastore\GetFileMetadataRequest $req);
+   /**
+    * @param \metastore\PutFileMetadataRequest $req
+    * @return \metastore\PutFileMetadataResult
+    */
+   public function put_file_metadata(\metastore\PutFileMetadataRequest $req);
+   /**
+    * @param \metastore\ClearFileMetadataRequest $req
+    * @return \metastore\ClearFileMetadataResult
+    */
+   public function clear_file_metadata(\metastore\ClearFileMetadataRequest $req);
+   /**
+    * @param \metastore\CacheFileMetadataRequest $req
+    * @return \metastore\CacheFileMetadataResult
+    */
+   public function cache_file_metadata(\metastore\CacheFileMetadataRequest $req);
+   /**
+    * @return string
+    * @throws \metastore\MetaException
+    */
+   public function get_metastore_db_uuid();
+   /**
+    * @param \metastore\WMCreateResourcePlanRequest $request
+    * @return \metastore\WMCreateResourcePlanResponse
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_resource_plan(\metastore\WMCreateResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMGetResourcePlanRequest $request
+    * @return \metastore\WMGetResourcePlanResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_resource_plan(\metastore\WMGetResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMGetActiveResourcePlanRequest $request
+    * @return \metastore\WMGetActiveResourcePlanResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_active_resource_plan(\metastore\WMGetActiveResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMGetAllResourcePlanRequest $request
+    * @return \metastore\WMGetAllResourcePlanResponse
+    * @throws \metastore\MetaException
+    */
+   public function get_all_resource_plans(\metastore\WMGetAllResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMAlterResourcePlanRequest $request
+    * @return \metastore\WMAlterResourcePlanResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function alter_resource_plan(\metastore\WMAlterResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMValidateResourcePlanRequest $request
+    * @return \metastore\WMValidateResourcePlanResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function validate_resource_plan(\metastore\WMValidateResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMDropResourcePlanRequest $request
+    * @return \metastore\WMDropResourcePlanResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_resource_plan(\metastore\WMDropResourcePlanRequest $request);
+   /**
+    * @param \metastore\WMCreateTriggerRequest $request
+    * @return \metastore\WMCreateTriggerResponse
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_wm_trigger(\metastore\WMCreateTriggerRequest $request);
+   /**
+    * @param \metastore\WMAlterTriggerRequest $request
+    * @return \metastore\WMAlterTriggerResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function alter_wm_trigger(\metastore\WMAlterTriggerRequest $request);
+   /**
+    * @param \metastore\WMDropTriggerRequest $request
+    * @return \metastore\WMDropTriggerResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_wm_trigger(\metastore\WMDropTriggerRequest $request);
+   /**
+    * @param \metastore\WMGetTriggersForResourePlanRequest $request
+    * @return \metastore\WMGetTriggersForResourePlanResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_triggers_for_resourceplan(\metastore\WMGetTriggersForResourePlanRequest $request);
+   /**
+    * @param \metastore\WMCreatePoolRequest $request
+    * @return \metastore\WMCreatePoolResponse
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_wm_pool(\metastore\WMCreatePoolRequest $request);
+   /**
+    * @param \metastore\WMAlterPoolRequest $request
+    * @return \metastore\WMAlterPoolResponse
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function alter_wm_pool(\metastore\WMAlterPoolRequest $request);
+   /**
+    * @param \metastore\WMDropPoolRequest $request
+    * @return \metastore\WMDropPoolResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_wm_pool(\metastore\WMDropPoolRequest $request);
+   /**
+    * @param \metastore\WMCreateOrUpdateMappingRequest $request
+    * @return \metastore\WMCreateOrUpdateMappingResponse
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_or_update_wm_mapping(\metastore\WMCreateOrUpdateMappingRequest $request);
+   /**
+    * @param \metastore\WMDropMappingRequest $request
+    * @return \metastore\WMDropMappingResponse
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_wm_mapping(\metastore\WMDropMappingRequest $request);
+   /**
+    * @param \metastore\WMCreateOrDropTriggerToPoolMappingRequest $request
+    * @return \metastore\WMCreateOrDropTriggerToPoolMappingResponse
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_or_drop_wm_trigger_to_pool_mapping(\metastore\WMCreateOrDropTriggerToPoolMappingRequest $request);
+   /**
+    * @param \metastore\ISchema $schema
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function create_ischema(\metastore\ISchema $schema);
+   /**
+    * @param \metastore\AlterISchemaRequest $rqst
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function alter_ischema(\metastore\AlterISchemaRequest $rqst);
+   /**
+    * @param \metastore\ISchemaName $name
+    * @return \metastore\ISchema
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_ischema(\metastore\ISchemaName $name);
+   /**
+    * @param \metastore\ISchemaName $name
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function drop_ischema(\metastore\ISchemaName $name);
+   /**
+    * @param \metastore\SchemaVersion $schemaVersion
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function add_schema_version(\metastore\SchemaVersion $schemaVersion);
+   /**
+    * @param \metastore\SchemaVersionDescriptor $schemaVersion
+    * @return \metastore\SchemaVersion
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_schema_version(\metastore\SchemaVersionDescriptor $schemaVersion);
+   /**
+    * @param \metastore\ISchemaName $schemaName
+    * @return \metastore\SchemaVersion
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_schema_latest_version(\metastore\ISchemaName $schemaName);
+   /**
+    * @param \metastore\ISchemaName $schemaName
+    * @return \metastore\SchemaVersion[]
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_schema_all_versions(\metastore\ISchemaName $schemaName);
+   /**
+    * @param \metastore\SchemaVersionDescriptor $schemaVersion
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function drop_schema_version(\metastore\SchemaVersionDescriptor $schemaVersion);
+   /**
+    * @param \metastore\FindSchemasByColsRqst $rqst
+    * @return \metastore\FindSchemasByColsResp
+    * @throws \metastore\MetaException
+    */
+   public function get_schemas_by_cols(\metastore\FindSchemasByColsRqst $rqst);
+   /**
+    * @param \metastore\MapSchemaVersionToSerdeRequest $rqst
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function map_schema_version_to_serde(\metastore\MapSchemaVersionToSerdeRequest $rqst);
+   /**
+    * @param \metastore\SetSchemaVersionStateRequest $rqst
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\InvalidOperationException
+    * @throws \metastore\MetaException
+    */
+   public function set_schema_version_state(\metastore\SetSchemaVersionStateRequest $rqst);
+   /**
+    * @param \metastore\SerDeInfo $serde
+    * @throws \metastore\AlreadyExistsException
+    * @throws \metastore\MetaException
+    */
+   public function add_serde(\metastore\SerDeInfo $serde);
+   /**
+    * @param \metastore\GetSerdeRequest $rqst
+    * @return \metastore\SerDeInfo
+    * @throws \metastore\NoSuchObjectException
+    * @throws \metastore\MetaException
+    */
+   public function get_serde(\metastore\GetSerdeRequest $rqst);
+   /**
+    * @param string $dbName
+    * @param string $tableName
+    * @param int $txnId
+    * @return \metastore\LockResponse
+    */
+   public function get_lock_materialization_rebuild($dbName, $tableName, $txnId);
+   /**
+    * @param string $dbName
+    * @param string $tableName
+    * @param int $txnId
+    * @return bool
+    */
+   public function heartbeat_lock_materialization_rebuild($dbName, $tableName, $txnId);
+   /**
+    * @param \metastore\RuntimeStat $stat
+    * @throws \metastore\MetaException
+    */
+   public function add_runtime_stats(\metastore\RuntimeStat $stat);
+   /**
+    * @param \metastore\GetRuntimeStatsRequest $rqst
+    * @return \metastore\RuntimeStat[]
+    * @throws \metastore\MetaException
+    */
+   public function get_runtime_stats(\metastore\GetRuntimeStatsRequest $rqst);
+ }
+ 
+ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metastore\ThriftHiveMetastoreIf {
+   public function __construct($input, $output=null) {
+     parent::__construct($input, $output);
+   }
+ 
+   public function getMetaConf($key)
+   {
+     $this->send_getMetaConf($key);
+     return $this->recv_getMetaConf();
+   }
+ 
+   public function send_getMetaConf($key)
+   {
+     $args = new \metastore\ThriftHiveMetastore_getMetaConf_args();
+     $args->key = $key;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'getMetaConf', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('getMetaConf', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_getMetaConf()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_getMetaConf_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_getMetaConf_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     throw new \Exception("getMetaConf failed: unknown result");
+   }
+ 
+   public function setMetaConf($key, $value)
+   {
+     $this->send_setMetaConf($key, $value);
+     $this->recv_setMetaConf();
+   }
+ 
+   public function send_setMetaConf($key, $value)
+   {
+     $args = new \metastore\ThriftHiveMetastore_setMetaConf_args();
+     $args->key = $key;
+     $args->value = $value;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'setMetaConf', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('setMetaConf', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_setMetaConf()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_setMetaConf_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_setMetaConf_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     return;
+   }
+ 
+   public function create_catalog(\metastore\CreateCatalogRequest $catalog)
+   {
+     $this->send_create_catalog($catalog);
+     $this->recv_create_catalog();
+   }
+ 
+   public function send_create_catalog(\metastore\CreateCatalogRequest $catalog)
+   {
+     $args = new \metastore\ThriftHiveMetastore_create_catalog_args();
+     $args->catalog = $catalog;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'create_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('create_catalog', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_create_catalog()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_catalog_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_create_catalog_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     if ($result->o3 !== null) {
+       throw $result->o3;
+     }
+     return;
+   }
+ 
+   public function alter_catalog(\metastore\AlterCatalogRequest $rqst)
+   {
+     $this->send_alter_catalog($rqst);
+     $this->recv_alter_catalog();
+   }
+ 
+   public function send_alter_catalog(\metastore\AlterCatalogRequest $rqst)
+   {
+     $args = new \metastore\ThriftHiveMetastore_alter_catalog_args();
+     $args->rqst = $rqst;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'alter_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('alter_catalog', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_alter_catalog()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_catalog_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_alter_catalog_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     if ($result->o3 !== null) {
+       throw $result->o3;
+     }
+     return;
+   }
+ 
+   public function get_catalog(\metastore\GetCatalogRequest $catName)
+   {
+     $this->send_get_catalog($catName);
+     return $this->recv_get_catalog();
+   }
+ 
+   public function send_get_catalog(\metastore\GetCatalogRequest $catName)
+   {
+     $args = new \metastore\ThriftHiveMetastore_get_catalog_args();
+     $args->catName = $catName;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'get_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('get_catalog', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_get_catalog()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_catalog_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_get_catalog_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     throw new \Exception("get_catalog failed: unknown result");
+   }
+ 
+   public function get_catalogs()
+   {
+     $this->send_get_catalogs();
+     return $this->recv_get_catalogs();
+   }
+ 
+   public function send_get_catalogs()
+   {
+     $args = new \metastore\ThriftHiveMetastore_get_catalogs_args();
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'get_catalogs', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('get_catalogs', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_get_catalogs()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_catalogs_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_get_catalogs_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     throw new \Exception("get_catalogs failed: unknown result");
+   }
+ 
+   public function drop_catalog(\metastore\DropCatalogRequest $catName)
+   {
+     $this->send_drop_catalog($catName);
+     $this->recv_drop_catalog();
+   }
+ 
+   public function send_drop_catalog(\metastore\DropCatalogRequest $catName)
+   {
+     $args = new \metastore\ThriftHiveMetastore_drop_catalog_args();
+     $args->catName = $catName;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'drop_catalog', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('drop_catalog', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_drop_catalog()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_catalog_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_drop_catalog_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     if ($result->o3 !== null) {
+       throw $result->o3;
+     }
+     return;
+   }
+ 
+   public function create_database(\metastore\Database $database)
+   {
+     $this->send_create_database($database);
+     $this->recv_create_database();
+   }
+ 
+   public function send_create_database(\metastore\Database $database)
+   {
+     $args = new \metastore\ThriftHiveMetastore_create_database_args();
+     $args->database = $database;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'create_database', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('create_database', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_create_database()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_database_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_create_database_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     if ($result->o3 !== null) {
+       throw $result->o3;
+     }
+     return;
+   }
+ 
+   public function get_database($name)
+   {
+     $this->send_get_database($name);
+     return $this->recv_get_database();
+   }
+ 
+   public function send_get_database($name)
+   {
+     $args = new \metastore\ThriftHiveMetastore_get_database_args();
+     $args->name = $name;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'get_database', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('get_database', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_get_database()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_database_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_get_database_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     throw new \Exception("get_database failed: unknown result");
+   }
+ 
+   public function drop_database($name, $deleteData, $cascade)
+   {
+     $this->send_drop_database($name, $deleteData, $cascade);
+     $this->recv_drop_database();
+   }
+ 
+   public function send_drop_database($name, $deleteData, $cascade)
+   {
+     $args = new \metastore\ThriftHiveMetastore_drop_database_args();
+     $args->name = $name;
+     $args->deleteData = $deleteData;
+     $args->cascade = $cascade;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'drop_database', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('drop_database', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_drop_database()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_drop_database_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_drop_database_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     if ($result->o3 !== null) {
+       throw $result->o3;
+     }
+     return;
+   }
+ 
+   public function get_databases($pattern)
+   {
+     $this->send_get_databases($pattern);
+     return $this->recv_get_databases();
+   }
+ 
+   public function send_get_databases($pattern)
+   {
+     $args = new \metastore\ThriftHiveMetastore_get_databases_args();
+     $args->pattern = $pattern;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'get_databases', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('get_databases', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_get_databases()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_databases_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_get_databases_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     throw new \Exception("get_databases failed: unknown result");
+   }
+ 
+   public function get_all_databases()
+   {
+     $this->send_get_all_databases();
+     return $this->recv_get_all_databases();
+   }
+ 
+   public function send_get_all_databases()
+   {
+     $args = new \metastore\ThriftHiveMetastore_get_all_databases_args();
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'get_all_databases', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('get_all_databases', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_get_all_databases()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_all_databases_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_get_all_databases_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     throw new \Exception("get_all_databases failed: unknown result");
+   }
+ 
+   public function alter_database($dbname, \metastore\Database $db)
+   {
+     $this->send_alter_database($dbname, $db);
+     $this->recv_alter_database();
+   }
+ 
+   public function send_alter_database($dbname, \metastore\Database $db)
+   {
+     $args = new \metastore\ThriftHiveMetastore_alter_database_args();
+     $args->dbname = $dbname;
+     $args->db = $db;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'alter_database', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('alter_database', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_alter_database()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_alter_database_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_alter_database_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     return;
+   }
+ 
+   public function get_type($name)
+   {
+     $this->send_get_type($name);
+     return $this->recv_get_type();
+   }
+ 
+   public function send_get_type($name)
+   {
+     $args = new \metastore\ThriftHiveMetastore_get_type_args();
+     $args->name = $name;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'get_type', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('get_type', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_get_type()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_type_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_get_type_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== null) {
+       throw $result->o1;
+     }
+     if ($result->o2 !== null) {
+       throw $result->o2;
+     }
+     throw new \Exception("get_type failed: unknown result");
+   }
+ 
+   public function create_type(\metastore\Type $type)
+   {
+     $this->send_create_type($type);
+     return $this->recv_create_type();
+   }
+ 
+   public function send_create_type(\metastore\Type $type)
+   {
+     $args = new \metastore\ThriftHiveMetastore_create_type_args();
+     $args->type = $type;
+     $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary');
+     if ($bin_accel)
+     {
+       thrift_protocol_write_binary($this->output_, 'create_type', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite());
+     }
+     else
+     {
+       $this->output_->writeMessageBegin('create_type', TMessageType::CALL, $this->seqid_);
+       $args->write($this->output_);
+       $this->output_->writeMessageEnd();
+       $this->output_->getTransport()->flush();
+     }
+   }
+ 
+   public function recv_create_type()
+   {
+     $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary');
+     if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_create_type_result', $this->input_->isStrictRead());
+     else
+     {
+       $rseqid = 0;
+       $fname = null;
+       $mtype = 0;
+ 
+       $this->input_->readMessageBegin($fname, $mtype, $rseqid);
+       if ($mtype == TMessageType::EXCEPTION) {
+         $x = new TApplicationException();
+         $x->read($this->input_);
+         $this->input_->readMessageEnd();
+         throw $x;
+       }
+       $result = new \metastore\ThriftHiveMetastore_create_type_result();
+       $result->read($this->input_);
+       $this->input_->readMessageEnd();
+     }
+     if ($result->success !== null) {
+       return $result->success;
+     }
+     if ($result->o1 !== 

<TRUNCATED>

[20/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
new file mode 100644
index 0000000..c37ce58
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java
@@ -0,0 +1,979 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class HiveObjectRef implements org.apache.thrift.TBase<HiveObjectRef, HiveObjectRef._Fields>, java.io.Serializable, Cloneable, Comparable<HiveObjectRef> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveObjectRef");
+
+  private static final org.apache.thrift.protocol.TField OBJECT_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("objectType", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField OBJECT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("objectName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField PART_VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("partValues", org.apache.thrift.protocol.TType.LIST, (short)4);
+  private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("columnName", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new HiveObjectRefStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new HiveObjectRefTupleSchemeFactory());
+  }
+
+  private HiveObjectType objectType; // required
+  private String dbName; // required
+  private String objectName; // required
+  private List<String> partValues; // required
+  private String columnName; // required
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see HiveObjectType
+     */
+    OBJECT_TYPE((short)1, "objectType"),
+    DB_NAME((short)2, "dbName"),
+    OBJECT_NAME((short)3, "objectName"),
+    PART_VALUES((short)4, "partValues"),
+    COLUMN_NAME((short)5, "columnName"),
+    CAT_NAME((short)6, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // OBJECT_TYPE
+          return OBJECT_TYPE;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // OBJECT_NAME
+          return OBJECT_NAME;
+        case 4: // PART_VALUES
+          return PART_VALUES;
+        case 5: // COLUMN_NAME
+          return COLUMN_NAME;
+        case 6: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.OBJECT_TYPE, new org.apache.thrift.meta_data.FieldMetaData("objectType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, HiveObjectType.class)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OBJECT_NAME, new org.apache.thrift.meta_data.FieldMetaData("objectName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PART_VALUES, new org.apache.thrift.meta_data.FieldMetaData("partValues", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("columnName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HiveObjectRef.class, metaDataMap);
+  }
+
+  public HiveObjectRef() {
+  }
+
+  public HiveObjectRef(
+    HiveObjectType objectType,
+    String dbName,
+    String objectName,
+    List<String> partValues,
+    String columnName)
+  {
+    this();
+    this.objectType = objectType;
+    this.dbName = dbName;
+    this.objectName = objectName;
+    this.partValues = partValues;
+    this.columnName = columnName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public HiveObjectRef(HiveObjectRef other) {
+    if (other.isSetObjectType()) {
+      this.objectType = other.objectType;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetObjectName()) {
+      this.objectName = other.objectName;
+    }
+    if (other.isSetPartValues()) {
+      List<String> __this__partValues = new ArrayList<String>(other.partValues);
+      this.partValues = __this__partValues;
+    }
+    if (other.isSetColumnName()) {
+      this.columnName = other.columnName;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public HiveObjectRef deepCopy() {
+    return new HiveObjectRef(this);
+  }
+
+  @Override
+  public void clear() {
+    this.objectType = null;
+    this.dbName = null;
+    this.objectName = null;
+    this.partValues = null;
+    this.columnName = null;
+    this.catName = null;
+  }
+
+  /**
+   * 
+   * @see HiveObjectType
+   */
+  public HiveObjectType getObjectType() {
+    return this.objectType;
+  }
+
+  /**
+   * 
+   * @see HiveObjectType
+   */
+  public void setObjectType(HiveObjectType objectType) {
+    this.objectType = objectType;
+  }
+
+  public void unsetObjectType() {
+    this.objectType = null;
+  }
+
+  /** Returns true if field objectType is set (has been assigned a value) and false otherwise */
+  public boolean isSetObjectType() {
+    return this.objectType != null;
+  }
+
+  public void setObjectTypeIsSet(boolean value) {
+    if (!value) {
+      this.objectType = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getObjectName() {
+    return this.objectName;
+  }
+
+  public void setObjectName(String objectName) {
+    this.objectName = objectName;
+  }
+
+  public void unsetObjectName() {
+    this.objectName = null;
+  }
+
+  /** Returns true if field objectName is set (has been assigned a value) and false otherwise */
+  public boolean isSetObjectName() {
+    return this.objectName != null;
+  }
+
+  public void setObjectNameIsSet(boolean value) {
+    if (!value) {
+      this.objectName = null;
+    }
+  }
+
+  public int getPartValuesSize() {
+    return (this.partValues == null) ? 0 : this.partValues.size();
+  }
+
+  public java.util.Iterator<String> getPartValuesIterator() {
+    return (this.partValues == null) ? null : this.partValues.iterator();
+  }
+
+  public void addToPartValues(String elem) {
+    if (this.partValues == null) {
+      this.partValues = new ArrayList<String>();
+    }
+    this.partValues.add(elem);
+  }
+
+  public List<String> getPartValues() {
+    return this.partValues;
+  }
+
+  public void setPartValues(List<String> partValues) {
+    this.partValues = partValues;
+  }
+
+  public void unsetPartValues() {
+    this.partValues = null;
+  }
+
+  /** Returns true if field partValues is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartValues() {
+    return this.partValues != null;
+  }
+
+  public void setPartValuesIsSet(boolean value) {
+    if (!value) {
+      this.partValues = null;
+    }
+  }
+
+  public String getColumnName() {
+    return this.columnName;
+  }
+
+  public void setColumnName(String columnName) {
+    this.columnName = columnName;
+  }
+
+  public void unsetColumnName() {
+    this.columnName = null;
+  }
+
+  /** Returns true if field columnName is set (has been assigned a value) and false otherwise */
+  public boolean isSetColumnName() {
+    return this.columnName != null;
+  }
+
+  public void setColumnNameIsSet(boolean value) {
+    if (!value) {
+      this.columnName = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case OBJECT_TYPE:
+      if (value == null) {
+        unsetObjectType();
+      } else {
+        setObjectType((HiveObjectType)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case OBJECT_NAME:
+      if (value == null) {
+        unsetObjectName();
+      } else {
+        setObjectName((String)value);
+      }
+      break;
+
+    case PART_VALUES:
+      if (value == null) {
+        unsetPartValues();
+      } else {
+        setPartValues((List<String>)value);
+      }
+      break;
+
+    case COLUMN_NAME:
+      if (value == null) {
+        unsetColumnName();
+      } else {
+        setColumnName((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case OBJECT_TYPE:
+      return getObjectType();
+
+    case DB_NAME:
+      return getDbName();
+
+    case OBJECT_NAME:
+      return getObjectName();
+
+    case PART_VALUES:
+      return getPartValues();
+
+    case COLUMN_NAME:
+      return getColumnName();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case OBJECT_TYPE:
+      return isSetObjectType();
+    case DB_NAME:
+      return isSetDbName();
+    case OBJECT_NAME:
+      return isSetObjectName();
+    case PART_VALUES:
+      return isSetPartValues();
+    case COLUMN_NAME:
+      return isSetColumnName();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof HiveObjectRef)
+      return this.equals((HiveObjectRef)that);
+    return false;
+  }
+
+  public boolean equals(HiveObjectRef that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_objectType = true && this.isSetObjectType();
+    boolean that_present_objectType = true && that.isSetObjectType();
+    if (this_present_objectType || that_present_objectType) {
+      if (!(this_present_objectType && that_present_objectType))
+        return false;
+      if (!this.objectType.equals(that.objectType))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_objectName = true && this.isSetObjectName();
+    boolean that_present_objectName = true && that.isSetObjectName();
+    if (this_present_objectName || that_present_objectName) {
+      if (!(this_present_objectName && that_present_objectName))
+        return false;
+      if (!this.objectName.equals(that.objectName))
+        return false;
+    }
+
+    boolean this_present_partValues = true && this.isSetPartValues();
+    boolean that_present_partValues = true && that.isSetPartValues();
+    if (this_present_partValues || that_present_partValues) {
+      if (!(this_present_partValues && that_present_partValues))
+        return false;
+      if (!this.partValues.equals(that.partValues))
+        return false;
+    }
+
+    boolean this_present_columnName = true && this.isSetColumnName();
+    boolean that_present_columnName = true && that.isSetColumnName();
+    if (this_present_columnName || that_present_columnName) {
+      if (!(this_present_columnName && that_present_columnName))
+        return false;
+      if (!this.columnName.equals(that.columnName))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_objectType = true && (isSetObjectType());
+    list.add(present_objectType);
+    if (present_objectType)
+      list.add(objectType.getValue());
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_objectName = true && (isSetObjectName());
+    list.add(present_objectName);
+    if (present_objectName)
+      list.add(objectName);
+
+    boolean present_partValues = true && (isSetPartValues());
+    list.add(present_partValues);
+    if (present_partValues)
+      list.add(partValues);
+
+    boolean present_columnName = true && (isSetColumnName());
+    list.add(present_columnName);
+    if (present_columnName)
+      list.add(columnName);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(HiveObjectRef other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetObjectType()).compareTo(other.isSetObjectType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetObjectType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.objectType, other.objectType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetObjectName()).compareTo(other.isSetObjectName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetObjectName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.objectName, other.objectName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartValues()).compareTo(other.isSetPartValues());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartValues()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partValues, other.partValues);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColumnName()).compareTo(other.isSetColumnName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColumnName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.columnName, other.columnName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("HiveObjectRef(");
+    boolean first = true;
+
+    sb.append("objectType:");
+    if (this.objectType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.objectType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("objectName:");
+    if (this.objectName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.objectName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("partValues:");
+    if (this.partValues == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partValues);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("columnName:");
+    if (this.columnName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.columnName);
+    }
+    first = false;
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class HiveObjectRefStandardSchemeFactory implements SchemeFactory {
+    public HiveObjectRefStandardScheme getScheme() {
+      return new HiveObjectRefStandardScheme();
+    }
+  }
+
+  private static class HiveObjectRefStandardScheme extends StandardScheme<HiveObjectRef> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, HiveObjectRef struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // OBJECT_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.objectType = org.apache.hadoop.hive.metastore.api.HiveObjectType.findByValue(iprot.readI32());
+              struct.setObjectTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // OBJECT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.objectName = iprot.readString();
+              struct.setObjectNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // PART_VALUES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list8 = iprot.readListBegin();
+                struct.partValues = new ArrayList<String>(_list8.size);
+                String _elem9;
+                for (int _i10 = 0; _i10 < _list8.size; ++_i10)
+                {
+                  _elem9 = iprot.readString();
+                  struct.partValues.add(_elem9);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartValuesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // COLUMN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.columnName = iprot.readString();
+              struct.setColumnNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, HiveObjectRef struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.objectType != null) {
+        oprot.writeFieldBegin(OBJECT_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.objectType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.objectName != null) {
+        oprot.writeFieldBegin(OBJECT_NAME_FIELD_DESC);
+        oprot.writeString(struct.objectName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partValues != null) {
+        oprot.writeFieldBegin(PART_VALUES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partValues.size()));
+          for (String _iter11 : struct.partValues)
+          {
+            oprot.writeString(_iter11);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.columnName != null) {
+        oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC);
+        oprot.writeString(struct.columnName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class HiveObjectRefTupleSchemeFactory implements SchemeFactory {
+    public HiveObjectRefTupleScheme getScheme() {
+      return new HiveObjectRefTupleScheme();
+    }
+  }
+
+  private static class HiveObjectRefTupleScheme extends TupleScheme<HiveObjectRef> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetObjectType()) {
+        optionals.set(0);
+      }
+      if (struct.isSetDbName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetObjectName()) {
+        optionals.set(2);
+      }
+      if (struct.isSetPartValues()) {
+        optionals.set(3);
+      }
+      if (struct.isSetColumnName()) {
+        optionals.set(4);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(5);
+      }
+      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetObjectType()) {
+        oprot.writeI32(struct.objectType.getValue());
+      }
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetObjectName()) {
+        oprot.writeString(struct.objectName);
+      }
+      if (struct.isSetPartValues()) {
+        {
+          oprot.writeI32(struct.partValues.size());
+          for (String _iter12 : struct.partValues)
+          {
+            oprot.writeString(_iter12);
+          }
+        }
+      }
+      if (struct.isSetColumnName()) {
+        oprot.writeString(struct.columnName);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(6);
+      if (incoming.get(0)) {
+        struct.objectType = org.apache.hadoop.hive.metastore.api.HiveObjectType.findByValue(iprot.readI32());
+        struct.setObjectTypeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.objectName = iprot.readString();
+        struct.setObjectNameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        {
+          org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.partValues = new ArrayList<String>(_list13.size);
+          String _elem14;
+          for (int _i15 = 0; _i15 < _list13.size; ++_i15)
+          {
+            _elem14 = iprot.readString();
+            struct.partValues.add(_elem14);
+          }
+        }
+        struct.setPartValuesIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.columnName = iprot.readString();
+        struct.setColumnNameIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectType.java
new file mode 100644
index 0000000..f5b1d0e
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectType.java
@@ -0,0 +1,54 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum HiveObjectType implements org.apache.thrift.TEnum {
+  GLOBAL(1),
+  DATABASE(2),
+  TABLE(3),
+  PARTITION(4),
+  COLUMN(5);
+
+  private final int value;
+
+  private HiveObjectType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static HiveObjectType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return GLOBAL;
+      case 2:
+        return DATABASE;
+      case 3:
+        return TABLE;
+      case 4:
+        return PARTITION;
+      case 5:
+        return COLUMN;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java
new file mode 100644
index 0000000..285f402
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ISchema.java
@@ -0,0 +1,1266 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ISchema implements org.apache.thrift.TBase<ISchema, ISchema._Fields>, java.io.Serializable, Cloneable, Comparable<ISchema> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ISchema");
+
+  private static final org.apache.thrift.protocol.TField SCHEMA_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaType", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField COMPATIBILITY_FIELD_DESC = new org.apache.thrift.protocol.TField("compatibility", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField VALIDATION_LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("validationLevel", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField CAN_EVOLVE_FIELD_DESC = new org.apache.thrift.protocol.TField("canEvolve", org.apache.thrift.protocol.TType.BOOL, (short)7);
+  private static final org.apache.thrift.protocol.TField SCHEMA_GROUP_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaGroup", org.apache.thrift.protocol.TType.STRING, (short)8);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ISchemaStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ISchemaTupleSchemeFactory());
+  }
+
+  private SchemaType schemaType; // required
+  private String name; // required
+  private String catName; // required
+  private String dbName; // required
+  private SchemaCompatibility compatibility; // required
+  private SchemaValidation validationLevel; // required
+  private boolean canEvolve; // required
+  private String schemaGroup; // optional
+  private String description; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see SchemaType
+     */
+    SCHEMA_TYPE((short)1, "schemaType"),
+    NAME((short)2, "name"),
+    CAT_NAME((short)3, "catName"),
+    DB_NAME((short)4, "dbName"),
+    /**
+     * 
+     * @see SchemaCompatibility
+     */
+    COMPATIBILITY((short)5, "compatibility"),
+    /**
+     * 
+     * @see SchemaValidation
+     */
+    VALIDATION_LEVEL((short)6, "validationLevel"),
+    CAN_EVOLVE((short)7, "canEvolve"),
+    SCHEMA_GROUP((short)8, "schemaGroup"),
+    DESCRIPTION((short)9, "description");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // SCHEMA_TYPE
+          return SCHEMA_TYPE;
+        case 2: // NAME
+          return NAME;
+        case 3: // CAT_NAME
+          return CAT_NAME;
+        case 4: // DB_NAME
+          return DB_NAME;
+        case 5: // COMPATIBILITY
+          return COMPATIBILITY;
+        case 6: // VALIDATION_LEVEL
+          return VALIDATION_LEVEL;
+        case 7: // CAN_EVOLVE
+          return CAN_EVOLVE;
+        case 8: // SCHEMA_GROUP
+          return SCHEMA_GROUP;
+        case 9: // DESCRIPTION
+          return DESCRIPTION;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CANEVOLVE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.SCHEMA_GROUP,_Fields.DESCRIPTION};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCHEMA_TYPE, new org.apache.thrift.meta_data.FieldMetaData("schemaType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaType.class)));
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COMPATIBILITY, new org.apache.thrift.meta_data.FieldMetaData("compatibility", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaCompatibility.class)));
+    tmpMap.put(_Fields.VALIDATION_LEVEL, new org.apache.thrift.meta_data.FieldMetaData("validationLevel", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SchemaValidation.class)));
+    tmpMap.put(_Fields.CAN_EVOLVE, new org.apache.thrift.meta_data.FieldMetaData("canEvolve", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.SCHEMA_GROUP, new org.apache.thrift.meta_data.FieldMetaData("schemaGroup", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ISchema.class, metaDataMap);
+  }
+
+  public ISchema() {
+  }
+
+  public ISchema(
+    SchemaType schemaType,
+    String name,
+    String catName,
+    String dbName,
+    SchemaCompatibility compatibility,
+    SchemaValidation validationLevel,
+    boolean canEvolve)
+  {
+    this();
+    this.schemaType = schemaType;
+    this.name = name;
+    this.catName = catName;
+    this.dbName = dbName;
+    this.compatibility = compatibility;
+    this.validationLevel = validationLevel;
+    this.canEvolve = canEvolve;
+    setCanEvolveIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ISchema(ISchema other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetSchemaType()) {
+      this.schemaType = other.schemaType;
+    }
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetCompatibility()) {
+      this.compatibility = other.compatibility;
+    }
+    if (other.isSetValidationLevel()) {
+      this.validationLevel = other.validationLevel;
+    }
+    this.canEvolve = other.canEvolve;
+    if (other.isSetSchemaGroup()) {
+      this.schemaGroup = other.schemaGroup;
+    }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+  }
+
+  public ISchema deepCopy() {
+    return new ISchema(this);
+  }
+
+  @Override
+  public void clear() {
+    this.schemaType = null;
+    this.name = null;
+    this.catName = null;
+    this.dbName = null;
+    this.compatibility = null;
+    this.validationLevel = null;
+    setCanEvolveIsSet(false);
+    this.canEvolve = false;
+    this.schemaGroup = null;
+    this.description = null;
+  }
+
+  /**
+   * 
+   * @see SchemaType
+   */
+  public SchemaType getSchemaType() {
+    return this.schemaType;
+  }
+
+  /**
+   * 
+   * @see SchemaType
+   */
+  public void setSchemaType(SchemaType schemaType) {
+    this.schemaType = schemaType;
+  }
+
+  public void unsetSchemaType() {
+    this.schemaType = null;
+  }
+
+  /** Returns true if field schemaType is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaType() {
+    return this.schemaType != null;
+  }
+
+  public void setSchemaTypeIsSet(boolean value) {
+    if (!value) {
+      this.schemaType = null;
+    }
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see SchemaCompatibility
+   */
+  public SchemaCompatibility getCompatibility() {
+    return this.compatibility;
+  }
+
+  /**
+   * 
+   * @see SchemaCompatibility
+   */
+  public void setCompatibility(SchemaCompatibility compatibility) {
+    this.compatibility = compatibility;
+  }
+
+  public void unsetCompatibility() {
+    this.compatibility = null;
+  }
+
+  /** Returns true if field compatibility is set (has been assigned a value) and false otherwise */
+  public boolean isSetCompatibility() {
+    return this.compatibility != null;
+  }
+
+  public void setCompatibilityIsSet(boolean value) {
+    if (!value) {
+      this.compatibility = null;
+    }
+  }
+
+  /**
+   * 
+   * @see SchemaValidation
+   */
+  public SchemaValidation getValidationLevel() {
+    return this.validationLevel;
+  }
+
+  /**
+   * 
+   * @see SchemaValidation
+   */
+  public void setValidationLevel(SchemaValidation validationLevel) {
+    this.validationLevel = validationLevel;
+  }
+
+  public void unsetValidationLevel() {
+    this.validationLevel = null;
+  }
+
+  /** Returns true if field validationLevel is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidationLevel() {
+    return this.validationLevel != null;
+  }
+
+  public void setValidationLevelIsSet(boolean value) {
+    if (!value) {
+      this.validationLevel = null;
+    }
+  }
+
+  public boolean isCanEvolve() {
+    return this.canEvolve;
+  }
+
+  public void setCanEvolve(boolean canEvolve) {
+    this.canEvolve = canEvolve;
+    setCanEvolveIsSet(true);
+  }
+
+  public void unsetCanEvolve() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CANEVOLVE_ISSET_ID);
+  }
+
+  /** Returns true if field canEvolve is set (has been assigned a value) and false otherwise */
+  public boolean isSetCanEvolve() {
+    return EncodingUtils.testBit(__isset_bitfield, __CANEVOLVE_ISSET_ID);
+  }
+
+  public void setCanEvolveIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CANEVOLVE_ISSET_ID, value);
+  }
+
+  public String getSchemaGroup() {
+    return this.schemaGroup;
+  }
+
+  public void setSchemaGroup(String schemaGroup) {
+    this.schemaGroup = schemaGroup;
+  }
+
+  public void unsetSchemaGroup() {
+    this.schemaGroup = null;
+  }
+
+  /** Returns true if field schemaGroup is set (has been assigned a value) and false otherwise */
+  public boolean isSetSchemaGroup() {
+    return this.schemaGroup != null;
+  }
+
+  public void setSchemaGroupIsSet(boolean value) {
+    if (!value) {
+      this.schemaGroup = null;
+    }
+  }
+
+  public String getDescription() {
+    return this.description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCHEMA_TYPE:
+      if (value == null) {
+        unsetSchemaType();
+      } else {
+        setSchemaType((SchemaType)value);
+      }
+      break;
+
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case COMPATIBILITY:
+      if (value == null) {
+        unsetCompatibility();
+      } else {
+        setCompatibility((SchemaCompatibility)value);
+      }
+      break;
+
+    case VALIDATION_LEVEL:
+      if (value == null) {
+        unsetValidationLevel();
+      } else {
+        setValidationLevel((SchemaValidation)value);
+      }
+      break;
+
+    case CAN_EVOLVE:
+      if (value == null) {
+        unsetCanEvolve();
+      } else {
+        setCanEvolve((Boolean)value);
+      }
+      break;
+
+    case SCHEMA_GROUP:
+      if (value == null) {
+        unsetSchemaGroup();
+      } else {
+        setSchemaGroup((String)value);
+      }
+      break;
+
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCHEMA_TYPE:
+      return getSchemaType();
+
+    case NAME:
+      return getName();
+
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case COMPATIBILITY:
+      return getCompatibility();
+
+    case VALIDATION_LEVEL:
+      return getValidationLevel();
+
+    case CAN_EVOLVE:
+      return isCanEvolve();
+
+    case SCHEMA_GROUP:
+      return getSchemaGroup();
+
+    case DESCRIPTION:
+      return getDescription();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCHEMA_TYPE:
+      return isSetSchemaType();
+    case NAME:
+      return isSetName();
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDbName();
+    case COMPATIBILITY:
+      return isSetCompatibility();
+    case VALIDATION_LEVEL:
+      return isSetValidationLevel();
+    case CAN_EVOLVE:
+      return isSetCanEvolve();
+    case SCHEMA_GROUP:
+      return isSetSchemaGroup();
+    case DESCRIPTION:
+      return isSetDescription();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ISchema)
+      return this.equals((ISchema)that);
+    return false;
+  }
+
+  public boolean equals(ISchema that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_schemaType = true && this.isSetSchemaType();
+    boolean that_present_schemaType = true && that.isSetSchemaType();
+    if (this_present_schemaType || that_present_schemaType) {
+      if (!(this_present_schemaType && that_present_schemaType))
+        return false;
+      if (!this.schemaType.equals(that.schemaType))
+        return false;
+    }
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_compatibility = true && this.isSetCompatibility();
+    boolean that_present_compatibility = true && that.isSetCompatibility();
+    if (this_present_compatibility || that_present_compatibility) {
+      if (!(this_present_compatibility && that_present_compatibility))
+        return false;
+      if (!this.compatibility.equals(that.compatibility))
+        return false;
+    }
+
+    boolean this_present_validationLevel = true && this.isSetValidationLevel();
+    boolean that_present_validationLevel = true && that.isSetValidationLevel();
+    if (this_present_validationLevel || that_present_validationLevel) {
+      if (!(this_present_validationLevel && that_present_validationLevel))
+        return false;
+      if (!this.validationLevel.equals(that.validationLevel))
+        return false;
+    }
+
+    boolean this_present_canEvolve = true;
+    boolean that_present_canEvolve = true;
+    if (this_present_canEvolve || that_present_canEvolve) {
+      if (!(this_present_canEvolve && that_present_canEvolve))
+        return false;
+      if (this.canEvolve != that.canEvolve)
+        return false;
+    }
+
+    boolean this_present_schemaGroup = true && this.isSetSchemaGroup();
+    boolean that_present_schemaGroup = true && that.isSetSchemaGroup();
+    if (this_present_schemaGroup || that_present_schemaGroup) {
+      if (!(this_present_schemaGroup && that_present_schemaGroup))
+        return false;
+      if (!this.schemaGroup.equals(that.schemaGroup))
+        return false;
+    }
+
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_schemaType = true && (isSetSchemaType());
+    list.add(present_schemaType);
+    if (present_schemaType)
+      list.add(schemaType.getValue());
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_compatibility = true && (isSetCompatibility());
+    list.add(present_compatibility);
+    if (present_compatibility)
+      list.add(compatibility.getValue());
+
+    boolean present_validationLevel = true && (isSetValidationLevel());
+    list.add(present_validationLevel);
+    if (present_validationLevel)
+      list.add(validationLevel.getValue());
+
+    boolean present_canEvolve = true;
+    list.add(present_canEvolve);
+    if (present_canEvolve)
+      list.add(canEvolve);
+
+    boolean present_schemaGroup = true && (isSetSchemaGroup());
+    list.add(present_schemaGroup);
+    if (present_schemaGroup)
+      list.add(schemaGroup);
+
+    boolean present_description = true && (isSetDescription());
+    list.add(present_description);
+    if (present_description)
+      list.add(description);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ISchema other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetSchemaType()).compareTo(other.isSetSchemaType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaType, other.schemaType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCompatibility()).compareTo(other.isSetCompatibility());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCompatibility()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.compatibility, other.compatibility);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidationLevel()).compareTo(other.isSetValidationLevel());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidationLevel()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validationLevel, other.validationLevel);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCanEvolve()).compareTo(other.isSetCanEvolve());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCanEvolve()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.canEvolve, other.canEvolve);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSchemaGroup()).compareTo(other.isSetSchemaGroup());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSchemaGroup()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaGroup, other.schemaGroup);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ISchema(");
+    boolean first = true;
+
+    sb.append("schemaType:");
+    if (this.schemaType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.schemaType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("compatibility:");
+    if (this.compatibility == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.compatibility);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validationLevel:");
+    if (this.validationLevel == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.validationLevel);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("canEvolve:");
+    sb.append(this.canEvolve);
+    first = false;
+    if (isSetSchemaGroup()) {
+      if (!first) sb.append(", ");
+      sb.append("schemaGroup:");
+      if (this.schemaGroup == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.schemaGroup);
+      }
+      first = false;
+    }
+    if (isSetDescription()) {
+      if (!first) sb.append(", ");
+      sb.append("description:");
+      if (this.description == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.description);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ISchemaStandardSchemeFactory implements SchemeFactory {
+    public ISchemaStandardScheme getScheme() {
+      return new ISchemaStandardScheme();
+    }
+  }
+
+  private static class ISchemaStandardScheme extends StandardScheme<ISchema> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ISchema struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // SCHEMA_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.schemaType = org.apache.hadoop.hive.metastore.api.SchemaType.findByValue(iprot.readI32());
+              struct.setSchemaTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // COMPATIBILITY
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32());
+              struct.setCompatibilityIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // VALIDATION_LEVEL
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32());
+              struct.setValidationLevelIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // CAN_EVOLVE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.canEvolve = iprot.readBool();
+              struct.setCanEvolveIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // SCHEMA_GROUP
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.schemaGroup = iprot.readString();
+              struct.setSchemaGroupIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ISchema struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.schemaType != null) {
+        oprot.writeFieldBegin(SCHEMA_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.schemaType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.compatibility != null) {
+        oprot.writeFieldBegin(COMPATIBILITY_FIELD_DESC);
+        oprot.writeI32(struct.compatibility.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.validationLevel != null) {
+        oprot.writeFieldBegin(VALIDATION_LEVEL_FIELD_DESC);
+        oprot.writeI32(struct.validationLevel.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CAN_EVOLVE_FIELD_DESC);
+      oprot.writeBool(struct.canEvolve);
+      oprot.writeFieldEnd();
+      if (struct.schemaGroup != null) {
+        if (struct.isSetSchemaGroup()) {
+          oprot.writeFieldBegin(SCHEMA_GROUP_FIELD_DESC);
+          oprot.writeString(struct.schemaGroup);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.description != null) {
+        if (struct.isSetDescription()) {
+          oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+          oprot.writeString(struct.description);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ISchemaTupleSchemeFactory implements SchemeFactory {
+    public ISchemaTupleScheme getScheme() {
+      return new ISchemaTupleScheme();
+    }
+  }
+
+  private static class ISchemaTupleScheme extends TupleScheme<ISchema> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ISchema struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetSchemaType()) {
+        optionals.set(0);
+      }
+      if (struct.isSetName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(2);
+      }
+      if (struct.isSetDbName()) {
+        optionals.set(3);
+      }
+      if (struct.isSetCompatibility()) {
+        optionals.set(4);
+      }
+      if (struct.isSetValidationLevel()) {
+        optionals.set(5);
+      }
+      if (struct.isSetCanEvolve()) {
+        optionals.set(6);
+      }
+      if (struct.isSetSchemaGroup()) {
+        optionals.set(7);
+      }
+      if (struct.isSetDescription()) {
+        optionals.set(8);
+      }
+      oprot.writeBitSet(optionals, 9);
+      if (struct.isSetSchemaType()) {
+        oprot.writeI32(struct.schemaType.getValue());
+      }
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetCompatibility()) {
+        oprot.writeI32(struct.compatibility.getValue());
+      }
+      if (struct.isSetValidationLevel()) {
+        oprot.writeI32(struct.validationLevel.getValue());
+      }
+      if (struct.isSetCanEvolve()) {
+        oprot.writeBool(struct.canEvolve);
+      }
+      if (struct.isSetSchemaGroup()) {
+        oprot.writeString(struct.schemaGroup);
+      }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ISchema struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(9);
+      if (incoming.get(0)) {
+        struct.schemaType = org.apache.hadoop.hive.metastore.api.SchemaType.findByValue(iprot.readI32());
+        struct.setSchemaTypeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.compatibility = org.apache.hadoop.hive.metastore.api.SchemaCompatibility.findByValue(iprot.readI32());
+        struct.setCompatibilityIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.validationLevel = org.apache.hadoop.hive.metastore.api.SchemaValidation.findByValue(iprot.readI32());
+        struct.setValidationLevelIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.canEvolve = iprot.readBool();
+        struct.setCanEvolveIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.schemaGroup = iprot.readString();
+        struct.setSchemaGroupIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+    }
+  }
+
+}
+


[15/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
new file mode 100644
index 0000000..c8e11c2
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchLockException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NoSuchLockException extends TException implements org.apache.thrift.TBase<NoSuchLockException, NoSuchLockException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchLockException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchLockException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NoSuchLockExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NoSuchLockExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NoSuchLockException.class, metaDataMap);
+  }
+
+  public NoSuchLockException() {
+  }
+
+  public NoSuchLockException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NoSuchLockException(NoSuchLockException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public NoSuchLockException deepCopy() {
+    return new NoSuchLockException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NoSuchLockException)
+      return this.equals((NoSuchLockException)that);
+    return false;
+  }
+
+  public boolean equals(NoSuchLockException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NoSuchLockException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NoSuchLockException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NoSuchLockExceptionStandardSchemeFactory implements SchemeFactory {
+    public NoSuchLockExceptionStandardScheme getScheme() {
+      return new NoSuchLockExceptionStandardScheme();
+    }
+  }
+
+  private static class NoSuchLockExceptionStandardScheme extends StandardScheme<NoSuchLockException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NoSuchLockException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NoSuchLockException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NoSuchLockExceptionTupleSchemeFactory implements SchemeFactory {
+    public NoSuchLockExceptionTupleScheme getScheme() {
+      return new NoSuchLockExceptionTupleScheme();
+    }
+  }
+
+  private static class NoSuchLockExceptionTupleScheme extends TupleScheme<NoSuchLockException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NoSuchLockException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NoSuchLockException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
new file mode 100644
index 0000000..122b340
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchObjectException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NoSuchObjectException extends TException implements org.apache.thrift.TBase<NoSuchObjectException, NoSuchObjectException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchObjectException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchObjectException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NoSuchObjectExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NoSuchObjectExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NoSuchObjectException.class, metaDataMap);
+  }
+
+  public NoSuchObjectException() {
+  }
+
+  public NoSuchObjectException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NoSuchObjectException(NoSuchObjectException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public NoSuchObjectException deepCopy() {
+    return new NoSuchObjectException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NoSuchObjectException)
+      return this.equals((NoSuchObjectException)that);
+    return false;
+  }
+
+  public boolean equals(NoSuchObjectException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NoSuchObjectException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NoSuchObjectException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NoSuchObjectExceptionStandardSchemeFactory implements SchemeFactory {
+    public NoSuchObjectExceptionStandardScheme getScheme() {
+      return new NoSuchObjectExceptionStandardScheme();
+    }
+  }
+
+  private static class NoSuchObjectExceptionStandardScheme extends StandardScheme<NoSuchObjectException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NoSuchObjectException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NoSuchObjectException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NoSuchObjectExceptionTupleSchemeFactory implements SchemeFactory {
+    public NoSuchObjectExceptionTupleScheme getScheme() {
+      return new NoSuchObjectExceptionTupleScheme();
+    }
+  }
+
+  private static class NoSuchObjectExceptionTupleScheme extends TupleScheme<NoSuchObjectException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NoSuchObjectException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NoSuchObjectException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
new file mode 100644
index 0000000..df62229
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NoSuchTxnException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NoSuchTxnException extends TException implements org.apache.thrift.TBase<NoSuchTxnException, NoSuchTxnException._Fields>, java.io.Serializable, Cloneable, Comparable<NoSuchTxnException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NoSuchTxnException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NoSuchTxnExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NoSuchTxnExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NoSuchTxnException.class, metaDataMap);
+  }
+
+  public NoSuchTxnException() {
+  }
+
+  public NoSuchTxnException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NoSuchTxnException(NoSuchTxnException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public NoSuchTxnException deepCopy() {
+    return new NoSuchTxnException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NoSuchTxnException)
+      return this.equals((NoSuchTxnException)that);
+    return false;
+  }
+
+  public boolean equals(NoSuchTxnException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NoSuchTxnException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NoSuchTxnException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NoSuchTxnExceptionStandardSchemeFactory implements SchemeFactory {
+    public NoSuchTxnExceptionStandardScheme getScheme() {
+      return new NoSuchTxnExceptionStandardScheme();
+    }
+  }
+
+  private static class NoSuchTxnExceptionStandardScheme extends StandardScheme<NoSuchTxnException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NoSuchTxnException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NoSuchTxnException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NoSuchTxnExceptionTupleSchemeFactory implements SchemeFactory {
+    public NoSuchTxnExceptionTupleScheme getScheme() {
+      return new NoSuchTxnExceptionTupleScheme();
+    }
+  }
+
+  private static class NoSuchTxnExceptionTupleScheme extends TupleScheme<NoSuchTxnException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NoSuchTxnException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NoSuchTxnException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java
new file mode 100644
index 0000000..803dc20
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsRequest.java
@@ -0,0 +1,591 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotNullConstraintsRequest implements org.apache.thrift.TBase<NotNullConstraintsRequest, NotNullConstraintsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<NotNullConstraintsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotNullConstraintsRequest");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotNullConstraintsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotNullConstraintsRequestTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String db_name; // required
+  private String tbl_name; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    DB_NAME((short)2, "db_name"),
+    TBL_NAME((short)3, "tbl_name");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TBL_NAME
+          return TBL_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotNullConstraintsRequest.class, metaDataMap);
+  }
+
+  public NotNullConstraintsRequest() {
+  }
+
+  public NotNullConstraintsRequest(
+    String catName,
+    String db_name,
+    String tbl_name)
+  {
+    this();
+    this.catName = catName;
+    this.db_name = db_name;
+    this.tbl_name = tbl_name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotNullConstraintsRequest(NotNullConstraintsRequest other) {
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDb_name()) {
+      this.db_name = other.db_name;
+    }
+    if (other.isSetTbl_name()) {
+      this.tbl_name = other.tbl_name;
+    }
+  }
+
+  public NotNullConstraintsRequest deepCopy() {
+    return new NotNullConstraintsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.db_name = null;
+    this.tbl_name = null;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDb_name() {
+    return this.db_name;
+  }
+
+  public void setDb_name(String db_name) {
+    this.db_name = db_name;
+  }
+
+  public void unsetDb_name() {
+    this.db_name = null;
+  }
+
+  /** Returns true if field db_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetDb_name() {
+    return this.db_name != null;
+  }
+
+  public void setDb_nameIsSet(boolean value) {
+    if (!value) {
+      this.db_name = null;
+    }
+  }
+
+  public String getTbl_name() {
+    return this.tbl_name;
+  }
+
+  public void setTbl_name(String tbl_name) {
+    this.tbl_name = tbl_name;
+  }
+
+  public void unsetTbl_name() {
+    this.tbl_name = null;
+  }
+
+  /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTbl_name() {
+    return this.tbl_name != null;
+  }
+
+  public void setTbl_nameIsSet(boolean value) {
+    if (!value) {
+      this.tbl_name = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDb_name();
+      } else {
+        setDb_name((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTbl_name();
+      } else {
+        setTbl_name((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDb_name();
+
+    case TBL_NAME:
+      return getTbl_name();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDb_name();
+    case TBL_NAME:
+      return isSetTbl_name();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotNullConstraintsRequest)
+      return this.equals((NotNullConstraintsRequest)that);
+    return false;
+  }
+
+  public boolean equals(NotNullConstraintsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_db_name = true && this.isSetDb_name();
+    boolean that_present_db_name = true && that.isSetDb_name();
+    if (this_present_db_name || that_present_db_name) {
+      if (!(this_present_db_name && that_present_db_name))
+        return false;
+      if (!this.db_name.equals(that.db_name))
+        return false;
+    }
+
+    boolean this_present_tbl_name = true && this.isSetTbl_name();
+    boolean that_present_tbl_name = true && that.isSetTbl_name();
+    if (this_present_tbl_name || that_present_tbl_name) {
+      if (!(this_present_tbl_name && that_present_tbl_name))
+        return false;
+      if (!this.tbl_name.equals(that.tbl_name))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_db_name = true && (isSetDb_name());
+    list.add(present_db_name);
+    if (present_db_name)
+      list.add(db_name);
+
+    boolean present_tbl_name = true && (isSetTbl_name());
+    list.add(present_tbl_name);
+    if (present_tbl_name)
+      list.add(tbl_name);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotNullConstraintsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDb_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTbl_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotNullConstraintsRequest(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("db_name:");
+    if (this.db_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.db_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tbl_name:");
+    if (this.tbl_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tbl_name);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetCatName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDb_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTbl_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tbl_name' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotNullConstraintsRequestStandardSchemeFactory implements SchemeFactory {
+    public NotNullConstraintsRequestStandardScheme getScheme() {
+      return new NotNullConstraintsRequestStandardScheme();
+    }
+  }
+
+  private static class NotNullConstraintsRequestStandardScheme extends StandardScheme<NotNullConstraintsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.db_name = iprot.readString();
+              struct.setDb_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tbl_name = iprot.readString();
+              struct.setTbl_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.db_name != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.db_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tbl_name != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tbl_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotNullConstraintsRequestTupleSchemeFactory implements SchemeFactory {
+    public NotNullConstraintsRequestTupleScheme getScheme() {
+      return new NotNullConstraintsRequestTupleScheme();
+    }
+  }
+
+  private static class NotNullConstraintsRequestTupleScheme extends TupleScheme<NotNullConstraintsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.catName);
+      oprot.writeString(struct.db_name);
+      oprot.writeString(struct.tbl_name);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.catName = iprot.readString();
+      struct.setCatNameIsSet(true);
+      struct.db_name = iprot.readString();
+      struct.setDb_nameIsSet(true);
+      struct.tbl_name = iprot.readString();
+      struct.setTbl_nameIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java
new file mode 100644
index 0000000..002ca13
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotNullConstraintsResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotNullConstraintsResponse implements org.apache.thrift.TBase<NotNullConstraintsResponse, NotNullConstraintsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<NotNullConstraintsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotNullConstraintsResponse");
+
+  private static final org.apache.thrift.protocol.TField NOT_NULL_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("notNullConstraints", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotNullConstraintsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotNullConstraintsResponseTupleSchemeFactory());
+  }
+
+  private List<SQLNotNullConstraint> notNullConstraints; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NOT_NULL_CONSTRAINTS((short)1, "notNullConstraints");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NOT_NULL_CONSTRAINTS
+          return NOT_NULL_CONSTRAINTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NOT_NULL_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("notNullConstraints", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLNotNullConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotNullConstraintsResponse.class, metaDataMap);
+  }
+
+  public NotNullConstraintsResponse() {
+  }
+
+  public NotNullConstraintsResponse(
+    List<SQLNotNullConstraint> notNullConstraints)
+  {
+    this();
+    this.notNullConstraints = notNullConstraints;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotNullConstraintsResponse(NotNullConstraintsResponse other) {
+    if (other.isSetNotNullConstraints()) {
+      List<SQLNotNullConstraint> __this__notNullConstraints = new ArrayList<SQLNotNullConstraint>(other.notNullConstraints.size());
+      for (SQLNotNullConstraint other_element : other.notNullConstraints) {
+        __this__notNullConstraints.add(new SQLNotNullConstraint(other_element));
+      }
+      this.notNullConstraints = __this__notNullConstraints;
+    }
+  }
+
+  public NotNullConstraintsResponse deepCopy() {
+    return new NotNullConstraintsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.notNullConstraints = null;
+  }
+
+  public int getNotNullConstraintsSize() {
+    return (this.notNullConstraints == null) ? 0 : this.notNullConstraints.size();
+  }
+
+  public java.util.Iterator<SQLNotNullConstraint> getNotNullConstraintsIterator() {
+    return (this.notNullConstraints == null) ? null : this.notNullConstraints.iterator();
+  }
+
+  public void addToNotNullConstraints(SQLNotNullConstraint elem) {
+    if (this.notNullConstraints == null) {
+      this.notNullConstraints = new ArrayList<SQLNotNullConstraint>();
+    }
+    this.notNullConstraints.add(elem);
+  }
+
+  public List<SQLNotNullConstraint> getNotNullConstraints() {
+    return this.notNullConstraints;
+  }
+
+  public void setNotNullConstraints(List<SQLNotNullConstraint> notNullConstraints) {
+    this.notNullConstraints = notNullConstraints;
+  }
+
+  public void unsetNotNullConstraints() {
+    this.notNullConstraints = null;
+  }
+
+  /** Returns true if field notNullConstraints is set (has been assigned a value) and false otherwise */
+  public boolean isSetNotNullConstraints() {
+    return this.notNullConstraints != null;
+  }
+
+  public void setNotNullConstraintsIsSet(boolean value) {
+    if (!value) {
+      this.notNullConstraints = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NOT_NULL_CONSTRAINTS:
+      if (value == null) {
+        unsetNotNullConstraints();
+      } else {
+        setNotNullConstraints((List<SQLNotNullConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NOT_NULL_CONSTRAINTS:
+      return getNotNullConstraints();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NOT_NULL_CONSTRAINTS:
+      return isSetNotNullConstraints();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotNullConstraintsResponse)
+      return this.equals((NotNullConstraintsResponse)that);
+    return false;
+  }
+
+  public boolean equals(NotNullConstraintsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_notNullConstraints = true && this.isSetNotNullConstraints();
+    boolean that_present_notNullConstraints = true && that.isSetNotNullConstraints();
+    if (this_present_notNullConstraints || that_present_notNullConstraints) {
+      if (!(this_present_notNullConstraints && that_present_notNullConstraints))
+        return false;
+      if (!this.notNullConstraints.equals(that.notNullConstraints))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_notNullConstraints = true && (isSetNotNullConstraints());
+    list.add(present_notNullConstraints);
+    if (present_notNullConstraints)
+      list.add(notNullConstraints);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotNullConstraintsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetNotNullConstraints()).compareTo(other.isSetNotNullConstraints());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNotNullConstraints()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.notNullConstraints, other.notNullConstraints);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotNullConstraintsResponse(");
+    boolean first = true;
+
+    sb.append("notNullConstraints:");
+    if (this.notNullConstraints == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.notNullConstraints);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNotNullConstraints()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'notNullConstraints' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotNullConstraintsResponseStandardSchemeFactory implements SchemeFactory {
+    public NotNullConstraintsResponseStandardScheme getScheme() {
+      return new NotNullConstraintsResponseStandardScheme();
+    }
+  }
+
+  private static class NotNullConstraintsResponseStandardScheme extends StandardScheme<NotNullConstraintsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NOT_NULL_CONSTRAINTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list344 = iprot.readListBegin();
+                struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list344.size);
+                SQLNotNullConstraint _elem345;
+                for (int _i346 = 0; _i346 < _list344.size; ++_i346)
+                {
+                  _elem345 = new SQLNotNullConstraint();
+                  _elem345.read(iprot);
+                  struct.notNullConstraints.add(_elem345);
+                }
+                iprot.readListEnd();
+              }
+              struct.setNotNullConstraintsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.notNullConstraints != null) {
+        oprot.writeFieldBegin(NOT_NULL_CONSTRAINTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraints.size()));
+          for (SQLNotNullConstraint _iter347 : struct.notNullConstraints)
+          {
+            _iter347.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotNullConstraintsResponseTupleSchemeFactory implements SchemeFactory {
+    public NotNullConstraintsResponseTupleScheme getScheme() {
+      return new NotNullConstraintsResponseTupleScheme();
+    }
+  }
+
+  private static class NotNullConstraintsResponseTupleScheme extends TupleScheme<NotNullConstraintsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.notNullConstraints.size());
+        for (SQLNotNullConstraint _iter348 : struct.notNullConstraints)
+        {
+          _iter348.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotNullConstraintsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list349 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.notNullConstraints = new ArrayList<SQLNotNullConstraint>(_list349.size);
+        SQLNotNullConstraint _elem350;
+        for (int _i351 = 0; _i351 < _list349.size; ++_i351)
+        {
+          _elem350 = new SQLNotNullConstraint();
+          _elem350.read(iprot);
+          struct.notNullConstraints.add(_elem350);
+        }
+      }
+      struct.setNotNullConstraintsIsSet(true);
+    }
+  }
+
+}
+


[39/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
new file mode 100644
index 0000000..30dca26
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/BooleanColumnStatsData.java
@@ -0,0 +1,696 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class BooleanColumnStatsData implements org.apache.thrift.TBase<BooleanColumnStatsData, BooleanColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<BooleanColumnStatsData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("BooleanColumnStatsData");
+
+  private static final org.apache.thrift.protocol.TField NUM_TRUES_FIELD_DESC = new org.apache.thrift.protocol.TField("numTrues", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField NUM_FALSES_FIELD_DESC = new org.apache.thrift.protocol.TField("numFalses", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField BIT_VECTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("bitVectors", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new BooleanColumnStatsDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new BooleanColumnStatsDataTupleSchemeFactory());
+  }
+
+  private long numTrues; // required
+  private long numFalses; // required
+  private long numNulls; // required
+  private ByteBuffer bitVectors; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NUM_TRUES((short)1, "numTrues"),
+    NUM_FALSES((short)2, "numFalses"),
+    NUM_NULLS((short)3, "numNulls"),
+    BIT_VECTORS((short)4, "bitVectors");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NUM_TRUES
+          return NUM_TRUES;
+        case 2: // NUM_FALSES
+          return NUM_FALSES;
+        case 3: // NUM_NULLS
+          return NUM_NULLS;
+        case 4: // BIT_VECTORS
+          return BIT_VECTORS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __NUMTRUES_ISSET_ID = 0;
+  private static final int __NUMFALSES_ISSET_ID = 1;
+  private static final int __NUMNULLS_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.BIT_VECTORS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NUM_TRUES, new org.apache.thrift.meta_data.FieldMetaData("numTrues", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_FALSES, new org.apache.thrift.meta_data.FieldMetaData("numFalses", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.BIT_VECTORS, new org.apache.thrift.meta_data.FieldMetaData("bitVectors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(BooleanColumnStatsData.class, metaDataMap);
+  }
+
+  public BooleanColumnStatsData() {
+  }
+
+  public BooleanColumnStatsData(
+    long numTrues,
+    long numFalses,
+    long numNulls)
+  {
+    this();
+    this.numTrues = numTrues;
+    setNumTruesIsSet(true);
+    this.numFalses = numFalses;
+    setNumFalsesIsSet(true);
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public BooleanColumnStatsData(BooleanColumnStatsData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.numTrues = other.numTrues;
+    this.numFalses = other.numFalses;
+    this.numNulls = other.numNulls;
+    if (other.isSetBitVectors()) {
+      this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(other.bitVectors);
+    }
+  }
+
+  public BooleanColumnStatsData deepCopy() {
+    return new BooleanColumnStatsData(this);
+  }
+
+  @Override
+  public void clear() {
+    setNumTruesIsSet(false);
+    this.numTrues = 0;
+    setNumFalsesIsSet(false);
+    this.numFalses = 0;
+    setNumNullsIsSet(false);
+    this.numNulls = 0;
+    this.bitVectors = null;
+  }
+
+  public long getNumTrues() {
+    return this.numTrues;
+  }
+
+  public void setNumTrues(long numTrues) {
+    this.numTrues = numTrues;
+    setNumTruesIsSet(true);
+  }
+
+  public void unsetNumTrues() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMTRUES_ISSET_ID);
+  }
+
+  /** Returns true if field numTrues is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumTrues() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMTRUES_ISSET_ID);
+  }
+
+  public void setNumTruesIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMTRUES_ISSET_ID, value);
+  }
+
+  public long getNumFalses() {
+    return this.numFalses;
+  }
+
+  public void setNumFalses(long numFalses) {
+    this.numFalses = numFalses;
+    setNumFalsesIsSet(true);
+  }
+
+  public void unsetNumFalses() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMFALSES_ISSET_ID);
+  }
+
+  /** Returns true if field numFalses is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumFalses() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMFALSES_ISSET_ID);
+  }
+
+  public void setNumFalsesIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMFALSES_ISSET_ID, value);
+  }
+
+  public long getNumNulls() {
+    return this.numNulls;
+  }
+
+  public void setNumNulls(long numNulls) {
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  public void unsetNumNulls() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumNulls() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  public void setNumNullsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value);
+  }
+
+  public byte[] getBitVectors() {
+    setBitVectors(org.apache.thrift.TBaseHelper.rightSize(bitVectors));
+    return bitVectors == null ? null : bitVectors.array();
+  }
+
+  public ByteBuffer bufferForBitVectors() {
+    return org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void setBitVectors(byte[] bitVectors) {
+    this.bitVectors = bitVectors == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bitVectors, bitVectors.length));
+  }
+
+  public void setBitVectors(ByteBuffer bitVectors) {
+    this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void unsetBitVectors() {
+    this.bitVectors = null;
+  }
+
+  /** Returns true if field bitVectors is set (has been assigned a value) and false otherwise */
+  public boolean isSetBitVectors() {
+    return this.bitVectors != null;
+  }
+
+  public void setBitVectorsIsSet(boolean value) {
+    if (!value) {
+      this.bitVectors = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NUM_TRUES:
+      if (value == null) {
+        unsetNumTrues();
+      } else {
+        setNumTrues((Long)value);
+      }
+      break;
+
+    case NUM_FALSES:
+      if (value == null) {
+        unsetNumFalses();
+      } else {
+        setNumFalses((Long)value);
+      }
+      break;
+
+    case NUM_NULLS:
+      if (value == null) {
+        unsetNumNulls();
+      } else {
+        setNumNulls((Long)value);
+      }
+      break;
+
+    case BIT_VECTORS:
+      if (value == null) {
+        unsetBitVectors();
+      } else {
+        setBitVectors((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NUM_TRUES:
+      return getNumTrues();
+
+    case NUM_FALSES:
+      return getNumFalses();
+
+    case NUM_NULLS:
+      return getNumNulls();
+
+    case BIT_VECTORS:
+      return getBitVectors();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NUM_TRUES:
+      return isSetNumTrues();
+    case NUM_FALSES:
+      return isSetNumFalses();
+    case NUM_NULLS:
+      return isSetNumNulls();
+    case BIT_VECTORS:
+      return isSetBitVectors();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof BooleanColumnStatsData)
+      return this.equals((BooleanColumnStatsData)that);
+    return false;
+  }
+
+  public boolean equals(BooleanColumnStatsData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_numTrues = true;
+    boolean that_present_numTrues = true;
+    if (this_present_numTrues || that_present_numTrues) {
+      if (!(this_present_numTrues && that_present_numTrues))
+        return false;
+      if (this.numTrues != that.numTrues)
+        return false;
+    }
+
+    boolean this_present_numFalses = true;
+    boolean that_present_numFalses = true;
+    if (this_present_numFalses || that_present_numFalses) {
+      if (!(this_present_numFalses && that_present_numFalses))
+        return false;
+      if (this.numFalses != that.numFalses)
+        return false;
+    }
+
+    boolean this_present_numNulls = true;
+    boolean that_present_numNulls = true;
+    if (this_present_numNulls || that_present_numNulls) {
+      if (!(this_present_numNulls && that_present_numNulls))
+        return false;
+      if (this.numNulls != that.numNulls)
+        return false;
+    }
+
+    boolean this_present_bitVectors = true && this.isSetBitVectors();
+    boolean that_present_bitVectors = true && that.isSetBitVectors();
+    if (this_present_bitVectors || that_present_bitVectors) {
+      if (!(this_present_bitVectors && that_present_bitVectors))
+        return false;
+      if (!this.bitVectors.equals(that.bitVectors))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_numTrues = true;
+    list.add(present_numTrues);
+    if (present_numTrues)
+      list.add(numTrues);
+
+    boolean present_numFalses = true;
+    list.add(present_numFalses);
+    if (present_numFalses)
+      list.add(numFalses);
+
+    boolean present_numNulls = true;
+    list.add(present_numNulls);
+    if (present_numNulls)
+      list.add(numNulls);
+
+    boolean present_bitVectors = true && (isSetBitVectors());
+    list.add(present_bitVectors);
+    if (present_bitVectors)
+      list.add(bitVectors);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(BooleanColumnStatsData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetNumTrues()).compareTo(other.isSetNumTrues());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumTrues()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numTrues, other.numTrues);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumFalses()).compareTo(other.isSetNumFalses());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumFalses()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numFalses, other.numFalses);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(other.isSetNumNulls());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumNulls()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, other.numNulls);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetBitVectors()).compareTo(other.isSetBitVectors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBitVectors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bitVectors, other.bitVectors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("BooleanColumnStatsData(");
+    boolean first = true;
+
+    sb.append("numTrues:");
+    sb.append(this.numTrues);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numFalses:");
+    sb.append(this.numFalses);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numNulls:");
+    sb.append(this.numNulls);
+    first = false;
+    if (isSetBitVectors()) {
+      if (!first) sb.append(", ");
+      sb.append("bitVectors:");
+      if (this.bitVectors == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.bitVectors, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNumTrues()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numTrues' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumFalses()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numFalses' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumNulls()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class BooleanColumnStatsDataStandardSchemeFactory implements SchemeFactory {
+    public BooleanColumnStatsDataStandardScheme getScheme() {
+      return new BooleanColumnStatsDataStandardScheme();
+    }
+  }
+
+  private static class BooleanColumnStatsDataStandardScheme extends StandardScheme<BooleanColumnStatsData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, BooleanColumnStatsData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NUM_TRUES
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numTrues = iprot.readI64();
+              struct.setNumTruesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // NUM_FALSES
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numFalses = iprot.readI64();
+              struct.setNumFalsesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NUM_NULLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numNulls = iprot.readI64();
+              struct.setNumNullsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // BIT_VECTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.bitVectors = iprot.readBinary();
+              struct.setBitVectorsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, BooleanColumnStatsData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(NUM_TRUES_FIELD_DESC);
+      oprot.writeI64(struct.numTrues);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_FALSES_FIELD_DESC);
+      oprot.writeI64(struct.numFalses);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC);
+      oprot.writeI64(struct.numNulls);
+      oprot.writeFieldEnd();
+      if (struct.bitVectors != null) {
+        if (struct.isSetBitVectors()) {
+          oprot.writeFieldBegin(BIT_VECTORS_FIELD_DESC);
+          oprot.writeBinary(struct.bitVectors);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class BooleanColumnStatsDataTupleSchemeFactory implements SchemeFactory {
+    public BooleanColumnStatsDataTupleScheme getScheme() {
+      return new BooleanColumnStatsDataTupleScheme();
+    }
+  }
+
+  private static class BooleanColumnStatsDataTupleScheme extends TupleScheme<BooleanColumnStatsData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, BooleanColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.numTrues);
+      oprot.writeI64(struct.numFalses);
+      oprot.writeI64(struct.numNulls);
+      BitSet optionals = new BitSet();
+      if (struct.isSetBitVectors()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetBitVectors()) {
+        oprot.writeBinary(struct.bitVectors);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, BooleanColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.numTrues = iprot.readI64();
+      struct.setNumTruesIsSet(true);
+      struct.numFalses = iprot.readI64();
+      struct.setNumFalsesIsSet(true);
+      struct.numNulls = iprot.readI64();
+      struct.setNumNullsIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.bitVectors = iprot.readBinary();
+        struct.setBitVectorsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataRequest.java
new file mode 100644
index 0000000..5c1d82e
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataRequest.java
@@ -0,0 +1,703 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CacheFileMetadataRequest implements org.apache.thrift.TBase<CacheFileMetadataRequest, CacheFileMetadataRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CacheFileMetadataRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CacheFileMetadataRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PART_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField IS_ALL_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("isAllParts", org.apache.thrift.protocol.TType.BOOL, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CacheFileMetadataRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CacheFileMetadataRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private String partName; // optional
+  private boolean isAllParts; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    PART_NAME((short)3, "partName"),
+    IS_ALL_PARTS((short)4, "isAllParts");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // PART_NAME
+          return PART_NAME;
+        case 4: // IS_ALL_PARTS
+          return IS_ALL_PARTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISALLPARTS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.PART_NAME,_Fields.IS_ALL_PARTS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PART_NAME, new org.apache.thrift.meta_data.FieldMetaData("partName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.IS_ALL_PARTS, new org.apache.thrift.meta_data.FieldMetaData("isAllParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CacheFileMetadataRequest.class, metaDataMap);
+  }
+
+  public CacheFileMetadataRequest() {
+  }
+
+  public CacheFileMetadataRequest(
+    String dbName,
+    String tblName)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CacheFileMetadataRequest(CacheFileMetadataRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetPartName()) {
+      this.partName = other.partName;
+    }
+    this.isAllParts = other.isAllParts;
+  }
+
+  public CacheFileMetadataRequest deepCopy() {
+    return new CacheFileMetadataRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.partName = null;
+    setIsAllPartsIsSet(false);
+    this.isAllParts = false;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public String getPartName() {
+    return this.partName;
+  }
+
+  public void setPartName(String partName) {
+    this.partName = partName;
+  }
+
+  public void unsetPartName() {
+    this.partName = null;
+  }
+
+  /** Returns true if field partName is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartName() {
+    return this.partName != null;
+  }
+
+  public void setPartNameIsSet(boolean value) {
+    if (!value) {
+      this.partName = null;
+    }
+  }
+
+  public boolean isIsAllParts() {
+    return this.isAllParts;
+  }
+
+  public void setIsAllParts(boolean isAllParts) {
+    this.isAllParts = isAllParts;
+    setIsAllPartsIsSet(true);
+  }
+
+  public void unsetIsAllParts() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISALLPARTS_ISSET_ID);
+  }
+
+  /** Returns true if field isAllParts is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsAllParts() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISALLPARTS_ISSET_ID);
+  }
+
+  public void setIsAllPartsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISALLPARTS_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case PART_NAME:
+      if (value == null) {
+        unsetPartName();
+      } else {
+        setPartName((String)value);
+      }
+      break;
+
+    case IS_ALL_PARTS:
+      if (value == null) {
+        unsetIsAllParts();
+      } else {
+        setIsAllParts((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case PART_NAME:
+      return getPartName();
+
+    case IS_ALL_PARTS:
+      return isIsAllParts();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case PART_NAME:
+      return isSetPartName();
+    case IS_ALL_PARTS:
+      return isSetIsAllParts();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CacheFileMetadataRequest)
+      return this.equals((CacheFileMetadataRequest)that);
+    return false;
+  }
+
+  public boolean equals(CacheFileMetadataRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_partName = true && this.isSetPartName();
+    boolean that_present_partName = true && that.isSetPartName();
+    if (this_present_partName || that_present_partName) {
+      if (!(this_present_partName && that_present_partName))
+        return false;
+      if (!this.partName.equals(that.partName))
+        return false;
+    }
+
+    boolean this_present_isAllParts = true && this.isSetIsAllParts();
+    boolean that_present_isAllParts = true && that.isSetIsAllParts();
+    if (this_present_isAllParts || that_present_isAllParts) {
+      if (!(this_present_isAllParts && that_present_isAllParts))
+        return false;
+      if (this.isAllParts != that.isAllParts)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_partName = true && (isSetPartName());
+    list.add(present_partName);
+    if (present_partName)
+      list.add(partName);
+
+    boolean present_isAllParts = true && (isSetIsAllParts());
+    list.add(present_isAllParts);
+    if (present_isAllParts)
+      list.add(isAllParts);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CacheFileMetadataRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartName()).compareTo(other.isSetPartName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partName, other.partName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIsAllParts()).compareTo(other.isSetIsAllParts());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsAllParts()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isAllParts, other.isAllParts);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CacheFileMetadataRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (isSetPartName()) {
+      if (!first) sb.append(", ");
+      sb.append("partName:");
+      if (this.partName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partName);
+      }
+      first = false;
+    }
+    if (isSetIsAllParts()) {
+      if (!first) sb.append(", ");
+      sb.append("isAllParts:");
+      sb.append(this.isAllParts);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CacheFileMetadataRequestStandardSchemeFactory implements SchemeFactory {
+    public CacheFileMetadataRequestStandardScheme getScheme() {
+      return new CacheFileMetadataRequestStandardScheme();
+    }
+  }
+
+  private static class CacheFileMetadataRequestStandardScheme extends StandardScheme<CacheFileMetadataRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CacheFileMetadataRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PART_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.partName = iprot.readString();
+              struct.setPartNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // IS_ALL_PARTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isAllParts = iprot.readBool();
+              struct.setIsAllPartsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CacheFileMetadataRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partName != null) {
+        if (struct.isSetPartName()) {
+          oprot.writeFieldBegin(PART_NAME_FIELD_DESC);
+          oprot.writeString(struct.partName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetIsAllParts()) {
+        oprot.writeFieldBegin(IS_ALL_PARTS_FIELD_DESC);
+        oprot.writeBool(struct.isAllParts);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CacheFileMetadataRequestTupleSchemeFactory implements SchemeFactory {
+    public CacheFileMetadataRequestTupleScheme getScheme() {
+      return new CacheFileMetadataRequestTupleScheme();
+    }
+  }
+
+  private static class CacheFileMetadataRequestTupleScheme extends TupleScheme<CacheFileMetadataRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CacheFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetIsAllParts()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetPartName()) {
+        oprot.writeString(struct.partName);
+      }
+      if (struct.isSetIsAllParts()) {
+        oprot.writeBool(struct.isAllParts);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CacheFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.partName = iprot.readString();
+        struct.setPartNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.isAllParts = iprot.readBool();
+        struct.setIsAllPartsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataResult.java
new file mode 100644
index 0000000..d4302b3
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CacheFileMetadataResult.java
@@ -0,0 +1,387 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CacheFileMetadataResult implements org.apache.thrift.TBase<CacheFileMetadataResult, CacheFileMetadataResult._Fields>, java.io.Serializable, Cloneable, Comparable<CacheFileMetadataResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CacheFileMetadataResult");
+
+  private static final org.apache.thrift.protocol.TField IS_SUPPORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("isSupported", org.apache.thrift.protocol.TType.BOOL, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CacheFileMetadataResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CacheFileMetadataResultTupleSchemeFactory());
+  }
+
+  private boolean isSupported; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    IS_SUPPORTED((short)1, "isSupported");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // IS_SUPPORTED
+          return IS_SUPPORTED;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISSUPPORTED_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.IS_SUPPORTED, new org.apache.thrift.meta_data.FieldMetaData("isSupported", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CacheFileMetadataResult.class, metaDataMap);
+  }
+
+  public CacheFileMetadataResult() {
+  }
+
+  public CacheFileMetadataResult(
+    boolean isSupported)
+  {
+    this();
+    this.isSupported = isSupported;
+    setIsSupportedIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CacheFileMetadataResult(CacheFileMetadataResult other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.isSupported = other.isSupported;
+  }
+
+  public CacheFileMetadataResult deepCopy() {
+    return new CacheFileMetadataResult(this);
+  }
+
+  @Override
+  public void clear() {
+    setIsSupportedIsSet(false);
+    this.isSupported = false;
+  }
+
+  public boolean isIsSupported() {
+    return this.isSupported;
+  }
+
+  public void setIsSupported(boolean isSupported) {
+    this.isSupported = isSupported;
+    setIsSupportedIsSet(true);
+  }
+
+  public void unsetIsSupported() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID);
+  }
+
+  /** Returns true if field isSupported is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsSupported() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID);
+  }
+
+  public void setIsSupportedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case IS_SUPPORTED:
+      if (value == null) {
+        unsetIsSupported();
+      } else {
+        setIsSupported((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case IS_SUPPORTED:
+      return isIsSupported();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case IS_SUPPORTED:
+      return isSetIsSupported();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CacheFileMetadataResult)
+      return this.equals((CacheFileMetadataResult)that);
+    return false;
+  }
+
+  public boolean equals(CacheFileMetadataResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_isSupported = true;
+    boolean that_present_isSupported = true;
+    if (this_present_isSupported || that_present_isSupported) {
+      if (!(this_present_isSupported && that_present_isSupported))
+        return false;
+      if (this.isSupported != that.isSupported)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_isSupported = true;
+    list.add(present_isSupported);
+    if (present_isSupported)
+      list.add(isSupported);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CacheFileMetadataResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetIsSupported()).compareTo(other.isSetIsSupported());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsSupported()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isSupported, other.isSupported);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CacheFileMetadataResult(");
+    boolean first = true;
+
+    sb.append("isSupported:");
+    sb.append(this.isSupported);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetIsSupported()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'isSupported' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CacheFileMetadataResultStandardSchemeFactory implements SchemeFactory {
+    public CacheFileMetadataResultStandardScheme getScheme() {
+      return new CacheFileMetadataResultStandardScheme();
+    }
+  }
+
+  private static class CacheFileMetadataResultStandardScheme extends StandardScheme<CacheFileMetadataResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CacheFileMetadataResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // IS_SUPPORTED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isSupported = iprot.readBool();
+              struct.setIsSupportedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CacheFileMetadataResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(IS_SUPPORTED_FIELD_DESC);
+      oprot.writeBool(struct.isSupported);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CacheFileMetadataResultTupleSchemeFactory implements SchemeFactory {
+    public CacheFileMetadataResultTupleScheme getScheme() {
+      return new CacheFileMetadataResultTupleScheme();
+    }
+  }
+
+  private static class CacheFileMetadataResultTupleScheme extends TupleScheme<CacheFileMetadataResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CacheFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeBool(struct.isSupported);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CacheFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.isSupported = iprot.readBool();
+      struct.setIsSupportedIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java
new file mode 100644
index 0000000..3eb4dbd
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Catalog.java
@@ -0,0 +1,606 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Catalog implements org.apache.thrift.TBase<Catalog, Catalog._Fields>, java.io.Serializable, Cloneable, Comparable<Catalog> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Catalog");
+
+  private static final org.apache.thrift.protocol.TField NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DESCRIPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("description", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField LOCATION_URI_FIELD_DESC = new org.apache.thrift.protocol.TField("locationUri", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CatalogStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CatalogTupleSchemeFactory());
+  }
+
+  private String name; // required
+  private String description; // optional
+  private String locationUri; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAME((short)1, "name"),
+    DESCRIPTION((short)2, "description"),
+    LOCATION_URI((short)3, "locationUri");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAME
+          return NAME;
+        case 2: // DESCRIPTION
+          return DESCRIPTION;
+        case 3: // LOCATION_URI
+          return LOCATION_URI;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.DESCRIPTION};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAME, new org.apache.thrift.meta_data.FieldMetaData("name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DESCRIPTION, new org.apache.thrift.meta_data.FieldMetaData("description", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.LOCATION_URI, new org.apache.thrift.meta_data.FieldMetaData("locationUri", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Catalog.class, metaDataMap);
+  }
+
+  public Catalog() {
+  }
+
+  public Catalog(
+    String name,
+    String locationUri)
+  {
+    this();
+    this.name = name;
+    this.locationUri = locationUri;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Catalog(Catalog other) {
+    if (other.isSetName()) {
+      this.name = other.name;
+    }
+    if (other.isSetDescription()) {
+      this.description = other.description;
+    }
+    if (other.isSetLocationUri()) {
+      this.locationUri = other.locationUri;
+    }
+  }
+
+  public Catalog deepCopy() {
+    return new Catalog(this);
+  }
+
+  @Override
+  public void clear() {
+    this.name = null;
+    this.description = null;
+    this.locationUri = null;
+  }
+
+  public String getName() {
+    return this.name;
+  }
+
+  public void setName(String name) {
+    this.name = name;
+  }
+
+  public void unsetName() {
+    this.name = null;
+  }
+
+  /** Returns true if field name is set (has been assigned a value) and false otherwise */
+  public boolean isSetName() {
+    return this.name != null;
+  }
+
+  public void setNameIsSet(boolean value) {
+    if (!value) {
+      this.name = null;
+    }
+  }
+
+  public String getDescription() {
+    return this.description;
+  }
+
+  public void setDescription(String description) {
+    this.description = description;
+  }
+
+  public void unsetDescription() {
+    this.description = null;
+  }
+
+  /** Returns true if field description is set (has been assigned a value) and false otherwise */
+  public boolean isSetDescription() {
+    return this.description != null;
+  }
+
+  public void setDescriptionIsSet(boolean value) {
+    if (!value) {
+      this.description = null;
+    }
+  }
+
+  public String getLocationUri() {
+    return this.locationUri;
+  }
+
+  public void setLocationUri(String locationUri) {
+    this.locationUri = locationUri;
+  }
+
+  public void unsetLocationUri() {
+    this.locationUri = null;
+  }
+
+  /** Returns true if field locationUri is set (has been assigned a value) and false otherwise */
+  public boolean isSetLocationUri() {
+    return this.locationUri != null;
+  }
+
+  public void setLocationUriIsSet(boolean value) {
+    if (!value) {
+      this.locationUri = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NAME:
+      if (value == null) {
+        unsetName();
+      } else {
+        setName((String)value);
+      }
+      break;
+
+    case DESCRIPTION:
+      if (value == null) {
+        unsetDescription();
+      } else {
+        setDescription((String)value);
+      }
+      break;
+
+    case LOCATION_URI:
+      if (value == null) {
+        unsetLocationUri();
+      } else {
+        setLocationUri((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NAME:
+      return getName();
+
+    case DESCRIPTION:
+      return getDescription();
+
+    case LOCATION_URI:
+      return getLocationUri();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NAME:
+      return isSetName();
+    case DESCRIPTION:
+      return isSetDescription();
+    case LOCATION_URI:
+      return isSetLocationUri();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Catalog)
+      return this.equals((Catalog)that);
+    return false;
+  }
+
+  public boolean equals(Catalog that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_name = true && this.isSetName();
+    boolean that_present_name = true && that.isSetName();
+    if (this_present_name || that_present_name) {
+      if (!(this_present_name && that_present_name))
+        return false;
+      if (!this.name.equals(that.name))
+        return false;
+    }
+
+    boolean this_present_description = true && this.isSetDescription();
+    boolean that_present_description = true && that.isSetDescription();
+    if (this_present_description || that_present_description) {
+      if (!(this_present_description && that_present_description))
+        return false;
+      if (!this.description.equals(that.description))
+        return false;
+    }
+
+    boolean this_present_locationUri = true && this.isSetLocationUri();
+    boolean that_present_locationUri = true && that.isSetLocationUri();
+    if (this_present_locationUri || that_present_locationUri) {
+      if (!(this_present_locationUri && that_present_locationUri))
+        return false;
+      if (!this.locationUri.equals(that.locationUri))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_name = true && (isSetName());
+    list.add(present_name);
+    if (present_name)
+      list.add(name);
+
+    boolean present_description = true && (isSetDescription());
+    list.add(present_description);
+    if (present_description)
+      list.add(description);
+
+    boolean present_locationUri = true && (isSetLocationUri());
+    list.add(present_locationUri);
+    if (present_locationUri)
+      list.add(locationUri);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Catalog other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetName()).compareTo(other.isSetName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.name, other.name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDescription()).compareTo(other.isSetDescription());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDescription()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.description, other.description);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetLocationUri()).compareTo(other.isSetLocationUri());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLocationUri()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.locationUri, other.locationUri);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Catalog(");
+    boolean first = true;
+
+    sb.append("name:");
+    if (this.name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.name);
+    }
+    first = false;
+    if (isSetDescription()) {
+      if (!first) sb.append(", ");
+      sb.append("description:");
+      if (this.description == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.description);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("locationUri:");
+    if (this.locationUri == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.locationUri);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CatalogStandardSchemeFactory implements SchemeFactory {
+    public CatalogStandardScheme getScheme() {
+      return new CatalogStandardScheme();
+    }
+  }
+
+  private static class CatalogStandardScheme extends StandardScheme<Catalog> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Catalog struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.name = iprot.readString();
+              struct.setNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DESCRIPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.description = iprot.readString();
+              struct.setDescriptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // LOCATION_URI
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.locationUri = iprot.readString();
+              struct.setLocationUriIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Catalog struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.name != null) {
+        oprot.writeFieldBegin(NAME_FIELD_DESC);
+        oprot.writeString(struct.name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.description != null) {
+        if (struct.isSetDescription()) {
+          oprot.writeFieldBegin(DESCRIPTION_FIELD_DESC);
+          oprot.writeString(struct.description);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.locationUri != null) {
+        oprot.writeFieldBegin(LOCATION_URI_FIELD_DESC);
+        oprot.writeString(struct.locationUri);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CatalogTupleSchemeFactory implements SchemeFactory {
+    public CatalogTupleScheme getScheme() {
+      return new CatalogTupleScheme();
+    }
+  }
+
+  private static class CatalogTupleScheme extends TupleScheme<Catalog> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Catalog struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetDescription()) {
+        optionals.set(1);
+      }
+      if (struct.isSetLocationUri()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetName()) {
+        oprot.writeString(struct.name);
+      }
+      if (struct.isSetDescription()) {
+        oprot.writeString(struct.description);
+      }
+      if (struct.isSetLocationUri()) {
+        oprot.writeString(struct.locationUri);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Catalog struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.name = iprot.readString();
+        struct.setNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.description = iprot.readString();
+        struct.setDescriptionIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.locationUri = iprot.readString();
+        struct.setLocationUriIsSet(true);
+      }
+    }
+  }
+
+}
+


[59/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
index 0000000,2e7ac5a..add7f99
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/postgres/hive-schema-4.0.0.postgres.sql
@@@ -1,0 -1,1858 +1,1860 @@@
+ --
+ -- PostgreSQL database dump
+ --
+ 
+ SET statement_timeout = 0;
+ SET client_encoding = 'UTF8';
+ SET standard_conforming_strings = off;
+ SET check_function_bodies = false;
+ SET client_min_messages = warning;
+ SET escape_string_warning = off;
+ 
+ SET search_path = public, pg_catalog;
+ 
+ SET default_tablespace = '';
+ 
+ SET default_with_oids = false;
+ 
+ --
+ -- Name: BUCKETING_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "BUCKETING_COLS" (
+     "SD_ID" bigint NOT NULL,
+     "BUCKET_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: CDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "CDS" (
+     "CD_ID" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: COLUMNS_V2; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "COLUMNS_V2" (
+     "CD_ID" bigint NOT NULL,
+     "COMMENT" character varying(4000),
+     "COLUMN_NAME" character varying(767) NOT NULL,
+     "TYPE_NAME" text,
+     "INTEGER_IDX" integer NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DATABASE_PARAMS" (
+     "DB_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(180) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ CREATE TABLE "CTLGS" (
+     "CTLG_ID" BIGINT PRIMARY KEY,
+     "NAME" VARCHAR(256) UNIQUE,
+     "DESC" VARCHAR(4000),
+     "LOCATION_URI" VARCHAR(4000) NOT NULL
+ );
+ 
+ --
+ -- Name: DBS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DBS" (
+     "DB_ID" bigint NOT NULL,
+     "DESC" character varying(4000) DEFAULT NULL::character varying,
+     "DB_LOCATION_URI" character varying(4000) NOT NULL,
+     "NAME" character varying(128) DEFAULT NULL::character varying,
+     "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+     "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+     "CTLG_NAME" varchar(256)
+ );
+ 
+ 
+ --
+ -- Name: DB_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "DB_PRIVS" (
+     "DB_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DB_ID" bigint,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "DB_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: GLOBAL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "GLOBAL_PRIVS" (
+     "USER_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "USER_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: IDXS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "IDXS" (
+     "INDEX_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DEFERRED_REBUILD" boolean NOT NULL,
+     "INDEX_HANDLER_CLASS" character varying(4000) DEFAULT NULL::character varying,
+     "INDEX_NAME" character varying(128) DEFAULT NULL::character varying,
+     "INDEX_TBL_ID" bigint,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "ORIG_TBL_ID" bigint,
+     "SD_ID" bigint
+ );
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "INDEX_PARAMS" (
+     "INDEX_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: NUCLEUS_TABLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "NUCLEUS_TABLES" (
+     "CLASS_NAME" character varying(128) NOT NULL,
+     "TABLE_NAME" character varying(128) NOT NULL,
+     "TYPE" character varying(4) NOT NULL,
+     "OWNER" character varying(2) NOT NULL,
+     "VERSION" character varying(20) NOT NULL,
+     "INTERFACE_NAME" character varying(255) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PARTITIONS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITIONS" (
+     "PART_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "PART_NAME" character varying(767) DEFAULT NULL::character varying,
+     "SD_ID" bigint,
 -    "TBL_ID" bigint
++    "TBL_ID" bigint,
++    "WRITE_ID" bigint DEFAULT 0
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_EVENTS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_EVENTS" (
+     "PART_NAME_ID" bigint NOT NULL,
+     "CAT_NAME" character varying(256),
+     "DB_NAME" character varying(128),
+     "EVENT_TIME" bigint NOT NULL,
+     "EVENT_TYPE" integer NOT NULL,
+     "PARTITION_NAME" character varying(767),
+     "TBL_NAME" character varying(256)
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_KEYS" (
+     "TBL_ID" bigint NOT NULL,
+     "PKEY_COMMENT" character varying(4000) DEFAULT NULL::character varying,
+     "PKEY_NAME" character varying(128) NOT NULL,
+     "PKEY_TYPE" character varying(767) NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_KEY_VALS" (
+     "PART_ID" bigint NOT NULL,
+     "PART_KEY_VAL" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PARTITION_PARAMS" (
+     "PART_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_COL_PRIVS" (
+     "PART_COLUMN_GRANT_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_ID" bigint,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: PART_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_PRIVS" (
+     "PART_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_ID" bigint,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PART_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: ROLES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "ROLES" (
+     "ROLE_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "OWNER_NAME" character varying(128) DEFAULT NULL::character varying,
+     "ROLE_NAME" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: ROLE_MAP; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "ROLE_MAP" (
+     "ROLE_GRANT_ID" bigint NOT NULL,
+     "ADD_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "ROLE_ID" bigint
+ );
+ 
+ 
+ --
+ -- Name: SDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SDS" (
+     "SD_ID" bigint NOT NULL,
+     "INPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+     "IS_COMPRESSED" boolean NOT NULL,
+     "LOCATION" character varying(4000) DEFAULT NULL::character varying,
+     "NUM_BUCKETS" bigint NOT NULL,
+     "OUTPUT_FORMAT" character varying(4000) DEFAULT NULL::character varying,
+     "SERDE_ID" bigint,
+     "CD_ID" bigint,
+     "IS_STOREDASSUBDIRECTORIES" boolean NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: SD_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SD_PARAMS" (
+     "SD_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: SEQUENCE_TABLE; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SEQUENCE_TABLE" (
+     "SEQUENCE_NAME" character varying(255) NOT NULL,
+     "NEXT_VAL" bigint NOT NULL
+ );
+ 
+ INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ --
+ -- Name: SERDES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SERDES" (
+     "SERDE_ID" bigint NOT NULL,
+     "NAME" character varying(128) DEFAULT NULL::character varying,
+     "SLIB" character varying(4000) DEFAULT NULL::character varying,
+     "DESCRIPTION" varchar(4000),
+     "SERIALIZER_CLASS" varchar(4000),
+     "DESERIALIZER_CLASS" varchar(4000),
+     "SERDE_TYPE" integer
+ );
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SERDE_PARAMS" (
+     "SERDE_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: SORT_COLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "SORT_COLS" (
+     "SD_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "ORDER" bigint NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TABLE_PARAMS" (
+     "TBL_ID" bigint NOT NULL,
+     "PARAM_KEY" character varying(256) NOT NULL,
+     "PARAM_VALUE" text DEFAULT NULL
+ );
+ 
+ 
+ --
+ -- Name: TBLS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBLS" (
+     "TBL_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "DB_ID" bigint,
+     "LAST_ACCESS_TIME" bigint NOT NULL,
+     "OWNER" character varying(767) DEFAULT NULL::character varying,
+     "OWNER_TYPE" character varying(10) DEFAULT NULL::character varying,
+     "RETENTION" bigint NOT NULL,
+     "SD_ID" bigint,
+     "TBL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "TBL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "VIEW_EXPANDED_TEXT" text,
+     "VIEW_ORIGINAL_TEXT" text,
 -    "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false
++    "IS_REWRITE_ENABLED" boolean NOT NULL DEFAULT false,
++    "WRITE_ID" bigint DEFAULT 0
+ );
+ 
+ --
+ -- Name: MV_CREATION_METADATA; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "MV_CREATION_METADATA" (
+     "MV_CREATION_METADATA_ID" bigint NOT NULL,
+     "CAT_NAME" character varying(256) NOT NULL,
+     "DB_NAME" character varying(128) NOT NULL,
+     "TBL_NAME" character varying(256) NOT NULL,
+     "TXN_LIST" text
+ );
+ 
+ --
+ -- Name: MV_TABLES_USED; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "MV_TABLES_USED" (
+     "MV_CREATION_METADATA_ID" bigint NOT NULL,
+     "TBL_ID" bigint NOT NULL
+ );
+ 
+ --
+ -- Name: TBL_COL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBL_COL_PRIVS" (
+     "TBL_COLUMN_GRANT_ID" bigint NOT NULL,
+     "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "TBL_COL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "TBL_ID" bigint,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TBL_PRIVS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TBL_PRIVS" (
+     "TBL_GRANT_ID" bigint NOT NULL,
+     "CREATE_TIME" bigint NOT NULL,
+     "GRANT_OPTION" smallint NOT NULL,
+     "GRANTOR" character varying(128) DEFAULT NULL::character varying,
+     "GRANTOR_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_NAME" character varying(128) DEFAULT NULL::character varying,
+     "PRINCIPAL_TYPE" character varying(128) DEFAULT NULL::character varying,
+     "TBL_PRIV" character varying(128) DEFAULT NULL::character varying,
+     "TBL_ID" bigint,
+     "AUTHORIZER" character varying(128) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TYPES; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TYPES" (
+     "TYPES_ID" bigint NOT NULL,
+     "TYPE_NAME" character varying(128) DEFAULT NULL::character varying,
+     "TYPE1" character varying(767) DEFAULT NULL::character varying,
+     "TYPE2" character varying(767) DEFAULT NULL::character varying
+ );
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS; Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "TYPE_FIELDS" (
+     "TYPE_NAME" bigint NOT NULL,
+     "COMMENT" character varying(256) DEFAULT NULL::character varying,
+     "FIELD_NAME" character varying(128) NOT NULL,
+     "FIELD_TYPE" character varying(767) NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_STRING_LIST" (
+     "STRING_LIST_ID" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_STRING_LIST_VALUES" (
+     "STRING_LIST_ID" bigint NOT NULL,
+     "STRING_LIST_VALUE" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_COL_NAMES" (
+     "SD_ID" bigint NOT NULL,
+     "SKEWED_COL_NAME" character varying(256) DEFAULT NULL::character varying,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "SKEWED_COL_VALUE_LOC_MAP" (
+     "SD_ID" bigint NOT NULL,
+     "STRING_LIST_ID_KID" bigint NOT NULL,
+     "LOCATION" character varying(4000) DEFAULT NULL::character varying
+ );
+ 
+ CREATE TABLE "SKEWED_VALUES" (
+     "SD_ID_OID" bigint NOT NULL,
+     "STRING_LIST_ID_EID" bigint NOT NULL,
+     "INTEGER_IDX" bigint NOT NULL
+ );
+ 
+ 
+ --
+ -- Name: TAB_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE  "MASTER_KEYS"
+ (
+     "KEY_ID" SERIAL,
+     "MASTER_KEY" varchar(767) NULL,
+     PRIMARY KEY ("KEY_ID")
+ );
+ 
+ CREATE TABLE  "DELEGATION_TOKENS"
+ (
+     "TOKEN_IDENT" varchar(767) NOT NULL,
+     "TOKEN" varchar(767) NULL,
+     PRIMARY KEY ("TOKEN_IDENT")
+ );
+ 
+ CREATE TABLE "TAB_COL_STATS" (
+  "CS_ID" bigint NOT NULL,
+  "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
+  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+  "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+  "TBL_ID" bigint NOT NULL,
+  "LONG_LOW_VALUE" bigint,
+  "LONG_HIGH_VALUE" bigint,
+  "DOUBLE_LOW_VALUE" double precision,
+  "DOUBLE_HIGH_VALUE" double precision,
+  "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "NUM_NULLS" bigint NOT NULL,
+  "NUM_DISTINCTS" bigint,
+  "BIT_VECTOR" bytea,
+  "AVG_COL_LEN" double precision,
+  "MAX_COL_LEN" bigint,
+  "NUM_TRUES" bigint,
+  "NUM_FALSES" bigint,
+  "LAST_ANALYZED" bigint NOT NULL
+ );
+ 
+ --
+ -- Table structure for VERSION
+ --
+ CREATE TABLE "VERSION" (
+   "VER_ID" bigint,
+   "SCHEMA_VERSION" character varying(127) NOT NULL,
+   "VERSION_COMMENT" character varying(255) NOT NULL
+ );
+ 
+ --
+ -- Name: PART_COL_STATS Type: TABLE; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE TABLE "PART_COL_STATS" (
+  "CS_ID" bigint NOT NULL,
+  "CAT_NAME" character varying(256) DEFAULT NULL::character varying,
+  "DB_NAME" character varying(128) DEFAULT NULL::character varying,
+  "TABLE_NAME" character varying(256) DEFAULT NULL::character varying,
+  "PARTITION_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_NAME" character varying(767) DEFAULT NULL::character varying,
+  "COLUMN_TYPE" character varying(128) DEFAULT NULL::character varying,
+  "PART_ID" bigint NOT NULL,
+  "LONG_LOW_VALUE" bigint,
+  "LONG_HIGH_VALUE" bigint,
+  "DOUBLE_LOW_VALUE" double precision,
+  "DOUBLE_HIGH_VALUE" double precision,
+  "BIG_DECIMAL_LOW_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "BIG_DECIMAL_HIGH_VALUE" character varying(4000) DEFAULT NULL::character varying,
+  "NUM_NULLS" bigint NOT NULL,
+  "NUM_DISTINCTS" bigint,
+  "BIT_VECTOR" bytea,
+  "AVG_COL_LEN" double precision,
+  "MAX_COL_LEN" bigint,
+  "NUM_TRUES" bigint,
+  "NUM_FALSES" bigint,
+  "LAST_ANALYZED" bigint NOT NULL
+ );
+ 
+ --
+ -- Table structure for FUNCS
+ --
+ CREATE TABLE "FUNCS" (
+   "FUNC_ID" BIGINT NOT NULL,
+   "CLASS_NAME" VARCHAR(4000),
+   "CREATE_TIME" INTEGER NOT NULL,
+   "DB_ID" BIGINT,
+   "FUNC_NAME" VARCHAR(128),
+   "FUNC_TYPE" INTEGER NOT NULL,
+   "OWNER_NAME" VARCHAR(128),
+   "OWNER_TYPE" VARCHAR(10),
+   PRIMARY KEY ("FUNC_ID")
+ );
+ 
+ --
+ -- Table structure for FUNC_RU
+ --
+ CREATE TABLE "FUNC_RU" (
+   "FUNC_ID" BIGINT NOT NULL,
+   "RESOURCE_TYPE" INTEGER NOT NULL,
+   "RESOURCE_URI" VARCHAR(4000),
+   "INTEGER_IDX" INTEGER NOT NULL,
+   PRIMARY KEY ("FUNC_ID", "INTEGER_IDX")
+ );
+ 
+ CREATE TABLE "NOTIFICATION_LOG"
+ (
+     "NL_ID" BIGINT NOT NULL,
+     "EVENT_ID" BIGINT NOT NULL,
+     "EVENT_TIME" INTEGER NOT NULL,
+     "EVENT_TYPE" VARCHAR(32) NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "TBL_NAME" VARCHAR(256),
+     "MESSAGE" text,
+     "MESSAGE_FORMAT" VARCHAR(16),
+     PRIMARY KEY ("NL_ID")
+ );
+ 
+ CREATE TABLE "NOTIFICATION_SEQUENCE"
+ (
+     "NNI_ID" BIGINT NOT NULL,
+     "NEXT_EVENT_ID" BIGINT NOT NULL,
+     PRIMARY KEY ("NNI_ID")
+ );
+ 
+ INSERT INTO "NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT 1,1 WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "NOTIFICATION_SEQUENCE");
+ 
+ CREATE TABLE "KEY_CONSTRAINTS"
+ (
+   "CHILD_CD_ID" BIGINT,
+   "CHILD_INTEGER_IDX" BIGINT,
+   "CHILD_TBL_ID" BIGINT,
+   "PARENT_CD_ID" BIGINT,
+   "PARENT_INTEGER_IDX" BIGINT NOT NULL,
+   "PARENT_TBL_ID" BIGINT NOT NULL,
+   "POSITION" BIGINT NOT NULL,
+   "CONSTRAINT_NAME" VARCHAR(400) NOT NULL,
+   "CONSTRAINT_TYPE" SMALLINT NOT NULL,
+   "UPDATE_RULE" SMALLINT,
+   "DELETE_RULE"	SMALLINT,
+   "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL,
+   "DEFAULT_VALUE" VARCHAR(400),
+   PRIMARY KEY ("CONSTRAINT_NAME", "POSITION")
+ ) ;
+ 
+ ---
+ --- Table structure for METASTORE_DB_PROPERTIES
+ ---
+ CREATE TABLE "METASTORE_DB_PROPERTIES"
+ (
+   "PROPERTY_KEY" VARCHAR(255) NOT NULL,
+   "PROPERTY_VALUE" VARCHAR(1000) NOT NULL,
+   "DESCRIPTION" VARCHAR(1000)
+ );
+ 
+ 
+ CREATE TABLE "WM_RESOURCEPLAN" (
+     "RP_ID" bigint NOT NULL,
+     "NAME" character varying(128) NOT NULL,
+     "QUERY_PARALLELISM" integer,
+     "STATUS" character varying(20) NOT NULL,
+     "DEFAULT_POOL_ID" bigint
+ );
+ 
+ CREATE TABLE "WM_POOL" (
+     "POOL_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "PATH" character varying(1024) NOT NULL,
+     "ALLOC_FRACTION" double precision,
+     "QUERY_PARALLELISM" integer,
+     "SCHEDULING_POLICY" character varying(1024)
+ );
+ 
+ CREATE TABLE "WM_TRIGGER" (
+     "TRIGGER_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "NAME" character varying(128) NOT NULL,
+     "TRIGGER_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+     "ACTION_EXPRESSION" character varying(1024) DEFAULT NULL::character varying,
+     "IS_IN_UNMANAGED" smallint NOT NULL DEFAULT 0
+ );
+ 
+ CREATE TABLE "WM_POOL_TO_TRIGGER" (
+     "POOL_ID" bigint NOT NULL,
+     "TRIGGER_ID" bigint NOT NULL
+ );
+ 
+ CREATE TABLE "WM_MAPPING" (
+     "MAPPING_ID" bigint NOT NULL,
+     "RP_ID" bigint NOT NULL,
+     "ENTITY_TYPE" character varying(128) NOT NULL,
+     "ENTITY_NAME" character varying(128) NOT NULL,
+     "POOL_ID" bigint,
+     "ORDERING" integer
+ );
+ 
+ --
+ -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "BUCKETING_COLS"
+     ADD CONSTRAINT "BUCKETING_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: CDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "CDS"
+     ADD CONSTRAINT "CDS_pkey" PRIMARY KEY ("CD_ID");
+ 
+ 
+ --
+ -- Name: COLUMNS_V2_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "COLUMNS_V2"
+     ADD CONSTRAINT "COLUMNS_V2_pkey" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DATABASE_PARAMS"
+     ADD CONSTRAINT "DATABASE_PARAMS_pkey" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: DBPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DBPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: DBS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DBS"
+     ADD CONSTRAINT "DBS_pkey" PRIMARY KEY ("DB_ID");
+ 
+ 
+ --
+ -- Name: DB_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DB_PRIVS_pkey" PRIMARY KEY ("DB_GRANT_ID");
+ 
+ 
+ --
+ -- Name: GLOBALPRIVILEGEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "GLOBAL_PRIVS"
+     ADD CONSTRAINT "GLOBALPRIVILEGEINDEX" UNIQUE ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: GLOBAL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "GLOBAL_PRIVS"
+     ADD CONSTRAINT "GLOBAL_PRIVS_pkey" PRIMARY KEY ("USER_GRANT_ID");
+ 
+ 
+ --
+ -- Name: IDXS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_pkey" PRIMARY KEY ("INDEX_ID");
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "INDEX_PARAMS"
+     ADD CONSTRAINT "INDEX_PARAMS_pkey" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: NUCLEUS_TABLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "NUCLEUS_TABLES"
+     ADD CONSTRAINT "NUCLEUS_TABLES_pkey" PRIMARY KEY ("CLASS_NAME");
+ 
+ 
+ --
+ -- Name: PARTITIONS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_pkey" PRIMARY KEY ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_EVENTS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_EVENTS"
+     ADD CONSTRAINT "PARTITION_EVENTS_pkey" PRIMARY KEY ("PART_NAME_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEYS"
+     ADD CONSTRAINT "PARTITION_KEYS_pkey" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEY_VALS"
+     ADD CONSTRAINT "PARTITION_KEY_VALS_pkey" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_PARAMS"
+     ADD CONSTRAINT "PARTITION_PARAMS_pkey" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PART_COL_PRIVS"
+     ADD CONSTRAINT "PART_COL_PRIVS_pkey" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+ 
+ 
+ --
+ -- Name: PART_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PART_PRIVS"
+     ADD CONSTRAINT "PART_PRIVS_pkey" PRIMARY KEY ("PART_GRANT_ID");
+ 
+ 
+ --
+ -- Name: ROLEENTITYINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLES"
+     ADD CONSTRAINT "ROLEENTITYINDEX" UNIQUE ("ROLE_NAME");
+ 
+ 
+ --
+ -- Name: ROLES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLES"
+     ADD CONSTRAINT "ROLES_pkey" PRIMARY KEY ("ROLE_ID");
+ 
+ 
+ --
+ -- Name: ROLE_MAP_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "ROLE_MAP_pkey" PRIMARY KEY ("ROLE_GRANT_ID");
+ 
+ 
+ --
+ -- Name: SDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_pkey" PRIMARY KEY ("SD_ID");
+ 
+ 
+ --
+ -- Name: SD_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SD_PARAMS"
+     ADD CONSTRAINT "SD_PARAMS_pkey" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: SEQUENCE_TABLE_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SEQUENCE_TABLE"
+     ADD CONSTRAINT "SEQUENCE_TABLE_pkey" PRIMARY KEY ("SEQUENCE_NAME");
+ 
+ 
+ --
+ -- Name: SERDES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SERDES"
+     ADD CONSTRAINT "SERDES_pkey" PRIMARY KEY ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SERDE_PARAMS"
+     ADD CONSTRAINT "SERDE_PARAMS_pkey" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: SORT_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "SORT_COLS"
+     ADD CONSTRAINT "SORT_COLS_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TABLE_PARAMS"
+     ADD CONSTRAINT "TABLE_PARAMS_pkey" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+ 
+ 
+ --
+ -- Name: TBLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_pkey" PRIMARY KEY ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBL_COL_PRIVS"
+     ADD CONSTRAINT "TBL_COL_PRIVS_pkey" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBL_PRIVS"
+     ADD CONSTRAINT "TBL_PRIVS_pkey" PRIMARY KEY ("TBL_GRANT_ID");
+ 
+ 
+ --
+ -- Name: TYPES_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPES"
+     ADD CONSTRAINT "TYPES_pkey" PRIMARY KEY ("TYPES_ID");
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPE_FIELDS"
+     ADD CONSTRAINT "TYPE_FIELDS_pkey" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_pkey" PRIMARY KEY ("STRING_LIST_ID");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_pkey" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_NAMES"
+     ADD CONSTRAINT "SKEWED_COL_NAMES_pkey" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_pkey" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_pkey" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+ 
+ --
+ -- Name: TAB_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+ 
+ --
+ -- Name: PART_COL_STATS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_pkey" PRIMARY KEY("CS_ID");
+ 
+ --
+ -- Name: UNIQUEINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "UNIQUEINDEX" UNIQUE ("INDEX_NAME", "ORIG_TBL_ID");
+ 
+ 
+ --
+ -- Name: UNIQUEPARTITION; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "UNIQUEPARTITION" UNIQUE ("PART_NAME", "TBL_ID");
+ 
+ 
+ --
+ -- Name: UNIQUETABLE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "UNIQUETABLE" UNIQUE ("TBL_NAME", "DB_ID");
+ 
+ 
+ --
+ -- Name: UNIQUE_DATABASE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "DBS"
+     ADD CONSTRAINT "UNIQUE_DATABASE" UNIQUE ("NAME", "CTLG_NAME");
+ 
+ 
+ --
+ -- Name: UNIQUE_TYPE; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "TYPES"
+     ADD CONSTRAINT "UNIQUE_TYPE" UNIQUE ("TYPE_NAME");
+ 
+ 
+ --
+ -- Name: USERROLEMAPINDEX; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "USERROLEMAPINDEX" UNIQUE ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+ 
+ ALTER TABLE ONLY "METASTORE_DB_PROPERTIES"
+     ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+ 
+ 
+ -- Resource plan: Primary key and unique key constraints.
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "WM_RESOURCEPLAN_pkey" PRIMARY KEY ("RP_ID");
+ 
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "UNIQUE_WM_RESOURCEPLAN" UNIQUE ("NAME");
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "WM_POOL_pkey" PRIMARY KEY ("POOL_ID");
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "UNIQUE_WM_POOL" UNIQUE ("RP_ID", "PATH");
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "WM_TRIGGER_pkey" PRIMARY KEY ("TRIGGER_ID");
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "UNIQUE_WM_TRIGGER" UNIQUE ("RP_ID", "NAME");
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_pkey" PRIMARY KEY ("POOL_ID", "TRIGGER_ID");
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_pkey" PRIMARY KEY ("MAPPING_ID");
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "UNIQUE_WM_MAPPING" UNIQUE ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ 
+ --
+ -- Name: BUCKETING_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "BUCKETING_COLS_N49" ON "BUCKETING_COLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "DATABASE_PARAMS_N49" ON "DATABASE_PARAMS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: DB_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "DB_PRIVS_N49" ON "DB_PRIVS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N49" ON "IDXS" USING btree ("ORIG_TBL_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N50" ON "IDXS" USING btree ("INDEX_TBL_ID");
+ 
+ 
+ --
+ -- Name: IDXS_N51; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "IDXS_N51" ON "IDXS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "INDEX_PARAMS_N49" ON "INDEX_PARAMS" USING btree ("INDEX_ID");
+ 
+ 
+ --
+ -- Name: PARTITIONCOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONCOLUMNPRIVILEGEINDEX" ON "PART_COL_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: PARTITIONEVENTINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONEVENTINDEX" ON "PARTITION_EVENTS" USING btree ("PARTITION_NAME");
+ 
+ 
+ --
+ -- Name: PARTITIONS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONS_N49" ON "PARTITIONS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: PARTITIONS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITIONS_N50" ON "PARTITIONS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_KEYS_N49" ON "PARTITION_KEYS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_KEY_VALS_N49" ON "PARTITION_KEY_VALS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTITION_PARAMS_N49" ON "PARTITION_PARAMS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PARTPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PARTPRIVILEGEINDEX" ON "PART_PRIVS" USING btree ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_COL_PRIVS_N49" ON "PART_COL_PRIVS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PART_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID");
+ 
+ 
+ --
+ -- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+ 
+ 
+ --
+ -- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "ROLE_MAP_N49" ON "ROLE_MAP" USING btree ("ROLE_ID");
+ 
+ 
+ --
+ -- Name: SDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SDS_N49" ON "SDS" USING btree ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SD_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SD_PARAMS_N49" ON "SD_PARAMS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SERDE_PARAMS_N49" ON "SERDE_PARAMS" USING btree ("SERDE_ID");
+ 
+ 
+ --
+ -- Name: SORT_COLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "SORT_COLS_N49" ON "SORT_COLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: TABLECOLUMNPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLECOLUMNPRIVILEGEINDEX" ON "TBL_COL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: TABLEPRIVILEGEINDEX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLEPRIVILEGEINDEX" ON "TBL_PRIVS" USING btree ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TABLE_PARAMS_N49" ON "TABLE_PARAMS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBLS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBLS_N49" ON "TBLS" USING btree ("DB_ID");
+ 
+ 
+ --
+ -- Name: TBLS_N50; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBLS_N50" ON "TBLS" USING btree ("SD_ID");
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBL_COL_PRIVS_N49" ON "TBL_COL_PRIVS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TBL_PRIVS_N49" ON "TBL_PRIVS" USING btree ("TBL_ID");
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TYPE_FIELDS_N49" ON "TYPE_FIELDS" USING btree ("TYPE_NAME");
+ 
+ --
+ -- Name: TAB_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TAB_COL_STATS_N49" ON "TAB_COL_STATS" USING btree ("TBL_ID");
+ 
+ --
+ -- Name: TAB_COL_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "TAB_COL_STATS_IDX" ON "TAB_COL_STATS" USING btree ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME");
+ 
+ 
+ --
+ -- Name: PART_COL_STATS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "PART_COL_STATS_N49" ON "PART_COL_STATS" USING btree ("PART_ID");
+ 
+ --
+ -- Name: UNIQUEFUNCTION; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE UNIQUE INDEX "UNIQUEFUNCTION" ON "FUNCS" ("FUNC_NAME", "DB_ID");
+ 
+ --
+ -- Name: FUNCS_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "FUNCS_N49" ON "FUNCS" ("DB_ID");
+ 
+ --
+ -- Name: FUNC_RU_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace:
+ --
+ 
+ CREATE INDEX "FUNC_RU_N49" ON "FUNC_RU" ("FUNC_ID");
+ 
+ CREATE INDEX "CONSTRAINTS_PARENT_TBLID_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("PARENT_TBL_ID");
+ 
+ CREATE INDEX "CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "KEY_CONSTRAINTS" USING BTREE ("CONSTRAINT_TYPE");
+ 
+ ALTER TABLE ONLY "SKEWED_STRING_LIST_VALUES"
+     ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_fkey" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_NAMES"
+     ADD CONSTRAINT "SKEWED_COL_NAMES_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey1" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_COL_VALUE_LOC_MAP"
+     ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_fkey2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_fkey1" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "SKEWED_STRING_LIST"("STRING_LIST_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "SKEWED_VALUES"
+     ADD CONSTRAINT "SKEWED_VALUES_fkey2" FOREIGN KEY ("SD_ID_OID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: BUCKETING_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "BUCKETING_COLS"
+     ADD CONSTRAINT "BUCKETING_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: COLUMNS_V2_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "COLUMNS_V2"
+     ADD CONSTRAINT "COLUMNS_V2_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: DATABASE_PARAMS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "DATABASE_PARAMS"
+     ADD CONSTRAINT "DATABASE_PARAMS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: DB_PRIVS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "DB_PRIVS"
+     ADD CONSTRAINT "DB_PRIVS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_INDEX_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_INDEX_TBL_ID_fkey" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_ORIG_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_ORIG_TBL_ID_fkey" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: IDXS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "IDXS"
+     ADD CONSTRAINT "IDXS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: INDEX_PARAMS_INDEX_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "INDEX_PARAMS"
+     ADD CONSTRAINT "INDEX_PARAMS_INDEX_ID_fkey" FOREIGN KEY ("INDEX_ID") REFERENCES "IDXS"("INDEX_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITIONS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITIONS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITIONS"
+     ADD CONSTRAINT "PARTITIONS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_KEYS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEYS"
+     ADD CONSTRAINT "PARTITION_KEYS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_KEY_VALS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_KEY_VALS"
+     ADD CONSTRAINT "PARTITION_KEY_VALS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PARTITION_PARAMS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PARTITION_PARAMS"
+     ADD CONSTRAINT "PARTITION_PARAMS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_COL_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PART_COL_PRIVS"
+     ADD CONSTRAINT "PART_COL_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_PRIVS_PART_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "PART_PRIVS"
+     ADD CONSTRAINT "PART_PRIVS_PART_ID_fkey" FOREIGN KEY ("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: ROLE_MAP_ROLE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "ROLE_MAP"
+     ADD CONSTRAINT "ROLE_MAP_ROLE_ID_fkey" FOREIGN KEY ("ROLE_ID") REFERENCES "ROLES"("ROLE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SDS_CD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_CD_ID_fkey" FOREIGN KEY ("CD_ID") REFERENCES "CDS"("CD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SDS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SDS"
+     ADD CONSTRAINT "SDS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SD_PARAMS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SD_PARAMS"
+     ADD CONSTRAINT "SD_PARAMS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SERDE_PARAMS_SERDE_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SERDE_PARAMS"
+     ADD CONSTRAINT "SERDE_PARAMS_SERDE_ID_fkey" FOREIGN KEY ("SERDE_ID") REFERENCES "SERDES"("SERDE_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: SORT_COLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "SORT_COLS"
+     ADD CONSTRAINT "SORT_COLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TABLE_PARAMS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TABLE_PARAMS"
+     ADD CONSTRAINT "TABLE_PARAMS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBLS_DB_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_DB_ID_fkey" FOREIGN KEY ("DB_ID") REFERENCES "DBS"("DB_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBLS_SD_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBLS"
+     ADD CONSTRAINT "TBLS_SD_ID_fkey" FOREIGN KEY ("SD_ID") REFERENCES "SDS"("SD_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBL_COL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBL_COL_PRIVS"
+     ADD CONSTRAINT "TBL_COL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TBL_PRIVS_TBL_ID_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TBL_PRIVS"
+     ADD CONSTRAINT "TBL_PRIVS_TBL_ID_fkey" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: TYPE_FIELDS_TYPE_NAME_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ 
+ ALTER TABLE ONLY "TYPE_FIELDS"
+     ADD CONSTRAINT "TYPE_FIELDS_TYPE_NAME_fkey" FOREIGN KEY ("TYPE_NAME") REFERENCES "TYPES"("TYPES_ID") DEFERRABLE;
+ 
+ --
+ -- Name: TAB_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ ALTER TABLE ONLY "TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_fkey" FOREIGN KEY("TBL_ID") REFERENCES "TBLS"("TBL_ID") DEFERRABLE;
+ 
+ 
+ --
+ -- Name: PART_COL_STATS_fkey; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ --
+ ALTER TABLE ONLY "PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_fkey" FOREIGN KEY("PART_ID") REFERENCES "PARTITIONS"("PART_ID") DEFERRABLE;
+ 
+ ALTER TABLE "DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "CTLGS" ("NAME");
+ 
+ ALTER TABLE ONLY "VERSION" ADD CONSTRAINT "VERSION_pkey" PRIMARY KEY ("VER_ID");
+ 
+ -- Name: FUNCS_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ ALTER TABLE ONLY "FUNCS"
+     ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "DBS" ("DB_ID") DEFERRABLE;
+ 
+ -- Name: FUNC_RU_FK1; Type: FK CONSTRAINT; Schema: public; Owner: hiveuser
+ ALTER TABLE ONLY "FUNC_RU"
+     ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "FUNCS" ("FUNC_ID") DEFERRABLE;
+ 
+ -- Resource plan FK constraints.
+ 
+ ALTER TABLE ONLY "WM_POOL"
+     ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_RESOURCEPLAN"
+     ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_TRIGGER"
+     ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_POOL_TO_TRIGGER"
+     ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "WM_TRIGGER" ("TRIGGER_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "WM_RESOURCEPLAN" ("RP_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "WM_MAPPING"
+     ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "WM_POOL" ("POOL_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "MV_CREATION_METADATA"
+     ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
+ 
+ CREATE INDEX "MV_UNIQUE_TABLE"
+     ON "MV_CREATION_METADATA" USING btree ("TBL_NAME", "DB_NAME");
+ 
+ ALTER TABLE ONLY "MV_TABLES_USED"
+     ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") DEFERRABLE;
+ 
+ ALTER TABLE ONLY "MV_TABLES_USED"
+     ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "TBLS" ("TBL_ID") DEFERRABLE;
+ 
+ --
+ -- Name: public; Type: ACL; Schema: -; Owner: hiveuser
+ --
+ 
+ REVOKE ALL ON SCHEMA public FROM PUBLIC;
+ GRANT ALL ON SCHEMA public TO PUBLIC;
+ 
+ --
+ -- PostgreSQL database dump complete
+ --
+ 
+ ------------------------------
+ -- Transaction and lock tables
+ ------------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT integer,
+   TXN_TYPE integer
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767) DEFAULT NULL,
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint
+ );
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS USING hash (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_INDEX ON COMPLETED_TXN_COMPONENTS USING btree (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767) DEFAULT NULL,
+   HL_LOCK_STATE char(1) NOT NULL,
+   HL_LOCK_TYPE char(1) NOT NULL,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT integer,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ );
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS USING hash (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO bytea,
+   CQ_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO bytea,
+   CC_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" integer not null,
+   "NAME" varchar(256) unique,
+   "DB_ID" bigint references "DBS" ("DB_ID"),
+   "COMPATIBILITY" integer not null,
+   "VALIDATION_LEVEL" integer not null,
+   "CAN_EVOLVE" boolean not null,
+   "SCHEMA_GROUP" varchar(256),
+   "DESCRIPTION" varchar(4000)
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" integer not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "CDS" ("CD_ID"), 
+   "STATE" integer not null,
+   "DESCRIPTION" varchar(4000),
+   "SCHEMA_TEXT" text,
+   "FINGERPRINT" varchar(256),
+   "SCHEMA_VERSION_NAME" varchar(256),
+   "SERDE_ID" bigint references "SERDES" ("SERDE_ID"), 
+   unique ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ 
+ CREATE TABLE RUNTIME_STATS (
+  RS_ID bigint primary key,
+  CREATE_TIME bigint NOT NULL,
+  WEIGHT bigint NOT NULL,
+  PAYLOAD bytea
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE "TXN_WRITE_NOTIFICATION_LOG" (
+   "WNL_ID" bigint NOT NULL,
+   "WNL_TXNID" bigint NOT NULL,
+   "WNL_WRITEID" bigint NOT NULL,
+   "WNL_DATABASE" varchar(128) NOT NULL,
+   "WNL_TABLE" varchar(128) NOT NULL,
+   "WNL_PARTITION" varchar(1024) NOT NULL,
+   "WNL_TABLE_OBJ" text NOT NULL,
+   "WNL_PARTITION_OBJ" text,
+   "WNL_FILES" text,
+   "WNL_EVENT_TIME" integer NOT NULL,
+   PRIMARY KEY ("WNL_TXNID", "WNL_DATABASE", "WNL_TABLE", "WNL_PARTITION")
+ );
+ 
+ INSERT INTO "SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO "VERSION" ("VER_ID", "SCHEMA_VERSION", "VERSION_COMMENT") VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
index 0000000,40d2795..eff08b3
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/postgres/upgrade-3.1.0-to-4.0.0.postgres.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0';
+ 
++-- HIVE-19416
++ALTER TABLE "TBLS" ADD "WRITE_ID" bigint;
++ALTER TABLE "PARTITIONS" ADD "WRITE_ID" bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE "VERSION" SET "SCHEMA_VERSION"='4.0.0', "VERSION_COMMENT"='Hive release version 4.0.0' where "VER_ID"=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0';
+ 


[87/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
index 0000000,51f809a..3778498
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java
@@@ -1,0 -1,1335 +1,1535 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Partition implements org.apache.thrift.TBase<Partition, Partition._Fields>, java.io.Serializable, Cloneable, Comparable<Partition> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Partition");
+ 
+   private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)3);
+   private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)4);
+   private static final org.apache.thrift.protocol.TField LAST_ACCESS_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAccessTime", org.apache.thrift.protocol.TType.I32, (short)5);
+   private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)6);
+   private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)7);
+   private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)8);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)9);
++  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)10);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)11);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new PartitionStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new PartitionTupleSchemeFactory());
+   }
+ 
+   private List<String> values; // required
+   private String dbName; // required
+   private String tableName; // required
+   private int createTime; // required
+   private int lastAccessTime; // required
+   private StorageDescriptor sd; // required
+   private Map<String,String> parameters; // required
+   private PrincipalPrivilegeSet privileges; // optional
+   private String catName; // optional
++  private long writeId; // optional
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     VALUES((short)1, "values"),
+     DB_NAME((short)2, "dbName"),
+     TABLE_NAME((short)3, "tableName"),
+     CREATE_TIME((short)4, "createTime"),
+     LAST_ACCESS_TIME((short)5, "lastAccessTime"),
+     SD((short)6, "sd"),
+     PARAMETERS((short)7, "parameters"),
+     PRIVILEGES((short)8, "privileges"),
 -    CAT_NAME((short)9, "catName");
++    CAT_NAME((short)9, "catName"),
++    WRITE_ID((short)10, "writeId"),
++    IS_STATS_COMPLIANT((short)11, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // VALUES
+           return VALUES;
+         case 2: // DB_NAME
+           return DB_NAME;
+         case 3: // TABLE_NAME
+           return TABLE_NAME;
+         case 4: // CREATE_TIME
+           return CREATE_TIME;
+         case 5: // LAST_ACCESS_TIME
+           return LAST_ACCESS_TIME;
+         case 6: // SD
+           return SD;
+         case 7: // PARAMETERS
+           return PARAMETERS;
+         case 8: // PRIVILEGES
+           return PRIVILEGES;
+         case 9: // CAT_NAME
+           return CAT_NAME;
++        case 10: // WRITE_ID
++          return WRITE_ID;
++        case 11: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
+   private static final int __CREATETIME_ISSET_ID = 0;
+   private static final int __LASTACCESSTIME_ISSET_ID = 1;
++  private static final int __WRITEID_ISSET_ID = 2;
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 3;
+   private byte __isset_bitfield = 0;
 -  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME};
++  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.CAT_NAME,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+     tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+     tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StorageDescriptor.class)));
+     tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+     tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Partition.class, metaDataMap);
+   }
+ 
+   public Partition() {
++    this.writeId = -1L;
++
+   }
+ 
+   public Partition(
+     List<String> values,
+     String dbName,
+     String tableName,
+     int createTime,
+     int lastAccessTime,
+     StorageDescriptor sd,
+     Map<String,String> parameters)
+   {
+     this();
+     this.values = values;
+     this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(dbName);
+     this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(tableName);
+     this.createTime = createTime;
+     setCreateTimeIsSet(true);
+     this.lastAccessTime = lastAccessTime;
+     setLastAccessTimeIsSet(true);
+     this.sd = sd;
+     this.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(parameters);
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public Partition(Partition other) {
+     __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetValues()) {
+       List<String> __this__values = new ArrayList<String>(other.values);
+       this.values = __this__values;
+     }
+     if (other.isSetDbName()) {
+       this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.dbName);
+     }
+     if (other.isSetTableName()) {
+       this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.tableName);
+     }
+     this.createTime = other.createTime;
+     this.lastAccessTime = other.lastAccessTime;
+     if (other.isSetSd()) {
+       this.sd = new StorageDescriptor(other.sd);
+     }
+     if (other.isSetParameters()) {
+       Map<String,String> __this__parameters = new HashMap<String,String>(other.parameters);
+       this.parameters = __this__parameters;
+     }
+     if (other.isSetPrivileges()) {
+       this.privileges = new PrincipalPrivilegeSet(other.privileges);
+     }
+     if (other.isSetCatName()) {
+       this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(other.catName);
+     }
++    this.writeId = other.writeId;
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public Partition deepCopy() {
+     return new Partition(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.values = null;
+     this.dbName = null;
+     this.tableName = null;
+     setCreateTimeIsSet(false);
+     this.createTime = 0;
+     setLastAccessTimeIsSet(false);
+     this.lastAccessTime = 0;
+     this.sd = null;
+     this.parameters = null;
+     this.privileges = null;
+     this.catName = null;
++    this.writeId = -1L;
++
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public int getValuesSize() {
+     return (this.values == null) ? 0 : this.values.size();
+   }
+ 
+   public java.util.Iterator<String> getValuesIterator() {
+     return (this.values == null) ? null : this.values.iterator();
+   }
+ 
+   public void addToValues(String elem) {
+     if (this.values == null) {
+       this.values = new ArrayList<String>();
+     }
+     this.values.add(elem);
+   }
+ 
+   public List<String> getValues() {
+     return this.values;
+   }
+ 
+   public void setValues(List<String> values) {
+     this.values = values;
+   }
+ 
+   public void unsetValues() {
+     this.values = null;
+   }
+ 
+   /** Returns true if field values is set (has been assigned a value) and false otherwise */
+   public boolean isSetValues() {
+     return this.values != null;
+   }
+ 
+   public void setValuesIsSet(boolean value) {
+     if (!value) {
+       this.values = null;
+     }
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(dbName);
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getTableName() {
+     return this.tableName;
+   }
+ 
+   public void setTableName(String tableName) {
+     this.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(tableName);
+   }
+ 
+   public void unsetTableName() {
+     this.tableName = null;
+   }
+ 
+   /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTableName() {
+     return this.tableName != null;
+   }
+ 
+   public void setTableNameIsSet(boolean value) {
+     if (!value) {
+       this.tableName = null;
+     }
+   }
+ 
+   public int getCreateTime() {
+     return this.createTime;
+   }
+ 
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+     setCreateTimeIsSet(true);
+   }
+ 
+   public void unsetCreateTime() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+   }
+ 
+   /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+   public boolean isSetCreateTime() {
+     return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+   }
+ 
+   public void setCreateTimeIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+   }
+ 
+   public int getLastAccessTime() {
+     return this.lastAccessTime;
+   }
+ 
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+     setLastAccessTimeIsSet(true);
+   }
+ 
+   public void unsetLastAccessTime() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+   }
+ 
+   /** Returns true if field lastAccessTime is set (has been assigned a value) and false otherwise */
+   public boolean isSetLastAccessTime() {
+     return EncodingUtils.testBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+   }
+ 
+   public void setLastAccessTimeIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID, value);
+   }
+ 
+   public StorageDescriptor getSd() {
+     return this.sd;
+   }
+ 
+   public void setSd(StorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   public void unsetSd() {
+     this.sd = null;
+   }
+ 
+   /** Returns true if field sd is set (has been assigned a value) and false otherwise */
+   public boolean isSetSd() {
+     return this.sd != null;
+   }
+ 
+   public void setSdIsSet(boolean value) {
+     if (!value) {
+       this.sd = null;
+     }
+   }
+ 
+   public int getParametersSize() {
+     return (this.parameters == null) ? 0 : this.parameters.size();
+   }
+ 
+   public void putToParameters(String key, String val) {
+     if (this.parameters == null) {
+       this.parameters = new HashMap<String,String>();
+     }
+     this.parameters.put(org.apache.hadoop.hive.metastore.utils.StringUtils.intern(key), org.apache.hadoop.hive.metastore.utils.StringUtils.intern(val));
+   }
+ 
+   public Map<String,String> getParameters() {
+     return this.parameters;
+   }
+ 
+   public void setParameters(Map<String,String> parameters) {
+     this.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(parameters);
+   }
+ 
+   public void unsetParameters() {
+     this.parameters = null;
+   }
+ 
+   /** Returns true if field parameters is set (has been assigned a value) and false otherwise */
+   public boolean isSetParameters() {
+     return this.parameters != null;
+   }
+ 
+   public void setParametersIsSet(boolean value) {
+     if (!value) {
+       this.parameters = null;
+     }
+   }
+ 
+   public PrincipalPrivilegeSet getPrivileges() {
+     return this.privileges;
+   }
+ 
+   public void setPrivileges(PrincipalPrivilegeSet privileges) {
+     this.privileges = privileges;
+   }
+ 
+   public void unsetPrivileges() {
+     this.privileges = null;
+   }
+ 
+   /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+   public boolean isSetPrivileges() {
+     return this.privileges != null;
+   }
+ 
+   public void setPrivilegesIsSet(boolean value) {
+     if (!value) {
+       this.privileges = null;
+     }
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(catName);
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
++  public long getWriteId() {
++    return this.writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++    setWriteIdIsSet(true);
++  }
++
++  public void unsetWriteId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
++  public boolean isSetWriteId() {
++    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  public void setWriteIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
++  }
++
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case VALUES:
+       if (value == null) {
+         unsetValues();
+       } else {
+         setValues((List<String>)value);
+       }
+       break;
+ 
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case TABLE_NAME:
+       if (value == null) {
+         unsetTableName();
+       } else {
+         setTableName((String)value);
+       }
+       break;
+ 
+     case CREATE_TIME:
+       if (value == null) {
+         unsetCreateTime();
+       } else {
+         setCreateTime((Integer)value);
+       }
+       break;
+ 
+     case LAST_ACCESS_TIME:
+       if (value == null) {
+         unsetLastAccessTime();
+       } else {
+         setLastAccessTime((Integer)value);
+       }
+       break;
+ 
+     case SD:
+       if (value == null) {
+         unsetSd();
+       } else {
+         setSd((StorageDescriptor)value);
+       }
+       break;
+ 
+     case PARAMETERS:
+       if (value == null) {
+         unsetParameters();
+       } else {
+         setParameters((Map<String,String>)value);
+       }
+       break;
+ 
+     case PRIVILEGES:
+       if (value == null) {
+         unsetPrivileges();
+       } else {
+         setPrivileges((PrincipalPrivilegeSet)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
++    case WRITE_ID:
++      if (value == null) {
++        unsetWriteId();
++      } else {
++        setWriteId((Long)value);
++      }
++      break;
++
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case VALUES:
+       return getValues();
+ 
+     case DB_NAME:
+       return getDbName();
+ 
+     case TABLE_NAME:
+       return getTableName();
+ 
+     case CREATE_TIME:
+       return getCreateTime();
+ 
+     case LAST_ACCESS_TIME:
+       return getLastAccessTime();
+ 
+     case SD:
+       return getSd();
+ 
+     case PARAMETERS:
+       return getParameters();
+ 
+     case PRIVILEGES:
+       return getPrivileges();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
++    case WRITE_ID:
++      return getWriteId();
++
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case VALUES:
+       return isSetValues();
+     case DB_NAME:
+       return isSetDbName();
+     case TABLE_NAME:
+       return isSetTableName();
+     case CREATE_TIME:
+       return isSetCreateTime();
+     case LAST_ACCESS_TIME:
+       return isSetLastAccessTime();
+     case SD:
+       return isSetSd();
+     case PARAMETERS:
+       return isSetParameters();
+     case PRIVILEGES:
+       return isSetPrivileges();
+     case CAT_NAME:
+       return isSetCatName();
++    case WRITE_ID:
++      return isSetWriteId();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof Partition)
+       return this.equals((Partition)that);
+     return false;
+   }
+ 
+   public boolean equals(Partition that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_values = true && this.isSetValues();
+     boolean that_present_values = true && that.isSetValues();
+     if (this_present_values || that_present_values) {
+       if (!(this_present_values && that_present_values))
+         return false;
+       if (!this.values.equals(that.values))
+         return false;
+     }
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_tableName = true && this.isSetTableName();
+     boolean that_present_tableName = true && that.isSetTableName();
+     if (this_present_tableName || that_present_tableName) {
+       if (!(this_present_tableName && that_present_tableName))
+         return false;
+       if (!this.tableName.equals(that.tableName))
+         return false;
+     }
+ 
+     boolean this_present_createTime = true;
+     boolean that_present_createTime = true;
+     if (this_present_createTime || that_present_createTime) {
+       if (!(this_present_createTime && that_present_createTime))
+         return false;
+       if (this.createTime != that.createTime)
+         return false;
+     }
+ 
+     boolean this_present_lastAccessTime = true;
+     boolean that_present_lastAccessTime = true;
+     if (this_present_lastAccessTime || that_present_lastAccessTime) {
+       if (!(this_present_lastAccessTime && that_present_lastAccessTime))
+         return false;
+       if (this.lastAccessTime != that.lastAccessTime)
+         return false;
+     }
+ 
+     boolean this_present_sd = true && this.isSetSd();
+     boolean that_present_sd = true && that.isSetSd();
+     if (this_present_sd || that_present_sd) {
+       if (!(this_present_sd && that_present_sd))
+         return false;
+       if (!this.sd.equals(that.sd))
+         return false;
+     }
+ 
+     boolean this_present_parameters = true && this.isSetParameters();
+     boolean that_present_parameters = true && that.isSetParameters();
+     if (this_present_parameters || that_present_parameters) {
+       if (!(this_present_parameters && that_present_parameters))
+         return false;
+       if (!this.parameters.equals(that.parameters))
+         return false;
+     }
+ 
+     boolean this_present_privileges = true && this.isSetPrivileges();
+     boolean that_present_privileges = true && that.isSetPrivileges();
+     if (this_present_privileges || that_present_privileges) {
+       if (!(this_present_privileges && that_present_privileges))
+         return false;
+       if (!this.privileges.equals(that.privileges))
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
++    boolean this_present_writeId = true && this.isSetWriteId();
++    boolean that_present_writeId = true && that.isSetWriteId();
++    if (this_present_writeId || that_present_writeId) {
++      if (!(this_present_writeId && that_present_writeId))
++        return false;
++      if (this.writeId != that.writeId)
++        return false;
++    }
++
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_values = true && (isSetValues());
+     list.add(present_values);
+     if (present_values)
+       list.add(values);
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_tableName = true && (isSetTableName());
+     list.add(present_tableName);
+     if (present_tableName)
+       list.add(tableName);
+ 
+     boolean present_createTime = true;
+     list.add(present_createTime);
+     if (present_createTime)
+       list.add(createTime);
+ 
+     boolean present_lastAccessTime = true;
+     list.add(present_lastAccessTime);
+     if (present_lastAccessTime)
+       list.add(lastAccessTime);
+ 
+     boolean present_sd = true && (isSetSd());
+     list.add(present_sd);
+     if (present_sd)
+       list.add(sd);
+ 
+     boolean present_parameters = true && (isSetParameters());
+     list.add(present_parameters);
+     if (present_parameters)
+       list.add(parameters);
+ 
+     boolean present_privileges = true && (isSetPrivileges());
+     list.add(present_privileges);
+     if (present_privileges)
+       list.add(privileges);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
++    boolean present_writeId = true && (isSetWriteId());
++    list.add(present_writeId);
++    if (present_writeId)
++      list.add(writeId);
++
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(Partition other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetValues()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTableName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCreateTime()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetLastAccessTime()).compareTo(other.isSetLastAccessTime());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetLastAccessTime()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lastAccessTime, other.lastAccessTime);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetSd()).compareTo(other.isSetSd());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetSd()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sd, other.sd);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetParameters()).compareTo(other.isSetParameters());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetParameters()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, other.parameters);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPrivileges()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetWriteId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("Partition(");
+     boolean first = true;
+ 
+     sb.append("values:");
+     if (this.values == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.values);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tableName:");
+     if (this.tableName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tableName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("createTime:");
+     sb.append(this.createTime);
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("lastAccessTime:");
+     sb.append(this.lastAccessTime);
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("sd:");
+     if (this.sd == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.sd);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("parameters:");
+     if (this.parameters == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.parameters);
+     }
+     first = false;
+     if (isSetPrivileges()) {
+       if (!first) sb.append(", ");
+       sb.append("privileges:");
+       if (this.privileges == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.privileges);
+       }
+       first = false;
+     }
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
++    if (isSetWriteId()) {
++      if (!first) sb.append(", ");
++      sb.append("writeId:");
++      sb.append(this.writeId);
++      first = false;
++    }
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     // check for sub-struct validity
+     if (sd != null) {
+       sd.validate();
+     }
+     if (privileges != null) {
+       privileges.validate();
+     }
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
+       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+       __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class PartitionStandardSchemeFactory implements SchemeFactory {
+     public PartitionStandardScheme getScheme() {
+       return new PartitionStandardScheme();
+     }
+   }
+ 
+   private static class PartitionStandardScheme extends StandardScheme<Partition> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // VALUES
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list216 = iprot.readListBegin();
+                 struct.values = new ArrayList<String>(_list216.size);
+                 String _elem217;
+                 for (int _i218 = 0; _i218 < _list216.size; ++_i218)
+                 {
+                   _elem217 = iprot.readString();
+                   struct.values.add(_elem217);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setValuesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // TABLE_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+               struct.setTableNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // CREATE_TIME
+             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+               struct.createTime = iprot.readI32();
+               struct.setCreateTimeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 5: // LAST_ACCESS_TIME
+             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+               struct.lastAccessTime = iprot.readI32();
+               struct.setLastAccessTimeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 6: // SD
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.sd = new StorageDescriptor();
+               struct.sd.read(iprot);
+               struct.setSdIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 7: // PARAMETERS
+             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+               {
+                 org.apache.thrift.protocol.TMap _map219 = iprot.readMapBegin();
+                 struct.parameters = new HashMap<String,String>(2*_map219.size);
+                 String _key220;
+                 String _val221;
+                 for (int _i222 = 0; _i222 < _map219.size; ++_i222)
+                 {
+                   _key220 = iprot.readString();
+                   _val221 = iprot.readString();
+                   struct.parameters.put(_key220, _val221);
+                 }
+                 iprot.readMapEnd();
+               }
+               struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 8: // PRIVILEGES
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.privileges = new PrincipalPrivilegeSet();
+               struct.privileges.read(iprot);
+               struct.setPrivilegesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 9: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 10: // WRITE_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.writeId = iprot.readI64();
++              struct.setWriteIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 11: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.values != null) {
+         oprot.writeFieldBegin(VALUES_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size()));
+           for (String _iter223 : struct.values)
+           {
+             oprot.writeString(_iter223);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tableName != null) {
+         oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+         oprot.writeString(struct.tableName);
+         oprot.writeFieldEnd();
+       }
+       oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+       oprot.writeI32(struct.createTime);
+       oprot.writeFieldEnd();
+       oprot.writeFieldBegin(LAST_ACCESS_TIME_FIELD_DESC);
+       oprot.writeI32(struct.lastAccessTime);
+       oprot.writeFieldEnd();
+       if (struct.sd != null) {
+         oprot.writeFieldBegin(SD_FIELD_DESC);
+         struct.sd.write(oprot);
+         oprot.writeFieldEnd();
+       }
+       if (struct.parameters != null) {
+         oprot.writeFieldBegin(PARAMETERS_FIELD_DESC);
+         {
+           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size()));
+           for (Map.Entry<String, String> _iter224 : struct.parameters.entrySet())
+           {
+             oprot.writeString(_iter224.getKey());
+             oprot.writeString(_iter224.getValue());
+           }
+           oprot.writeMapEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.privileges != null) {
+         if (struct.isSetPrivileges()) {
+           oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+           struct.privileges.write(oprot);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetWriteId()) {
++        oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
++        oprot.writeI64(struct.writeId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class PartitionTupleSchemeFactory implements SchemeFactory {
+     public PartitionTupleScheme getScheme() {
+       return new PartitionTupleScheme();
+     }
+   }
+ 
+   private static class PartitionTupleScheme extends TupleScheme<Partition> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       BitSet optionals = new BitSet();
+       if (struct.isSetValues()) {
+         optionals.set(0);
+       }
+       if (struct.isSetDbName()) {
+         optionals.set(1);
+       }
+       if (struct.isSetTableName()) {
+         optionals.set(2);
+       }
+       if (struct.isSetCreateTime()) {
+         optionals.set(3);
+       }
+       if (struct.isSetLastAccessTime()) {
+         optionals.set(4);
+       }
+       if (struct.isSetSd()) {
+         optionals.set(5);
+       }
+       if (struct.isSetParameters()) {
+         optionals.set(6);
+       }
+       if (struct.isSetPrivileges()) {
+         optionals.set(7);
+       }
+       if (struct.isSetCatName()) {
+         optionals.set(8);
+       }
 -      oprot.writeBitSet(optionals, 9);
++      if (struct.isSetWriteId()) {
++        optionals.set(9);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(10);
++      }
++      oprot.writeBitSet(optionals, 11);
+       if (struct.isSetValues()) {
+         {
+           oprot.writeI32(struct.values.size());
+           for (String _iter225 : struct.values)
+           {
+             oprot.writeString(_iter225);
+           }
+         }
+       }
+       if (struct.isSetDbName()) {
+         oprot.writeString(struct.dbName);
+       }
+       if (struct.isSetTableName()) {
+         oprot.writeString(struct.tableName);
+       }
+       if (struct.isSetCreateTime()) {
+         oprot.writeI32(struct.createTime);
+       }
+       if (struct.isSetLastAccessTime()) {
+         oprot.writeI32(struct.lastAccessTime);
+       }
+       if (struct.isSetSd()) {
+         struct.sd.write(oprot);
+       }
+       if (struct.isSetParameters()) {
+         {
+           oprot.writeI32(struct.parameters.size());
+           for (Map.Entry<String, String> _iter226 : struct.parameters.entrySet())
+           {
+             oprot.writeString(_iter226.getKey());
+             oprot.writeString(_iter226.getValue());
+           }
+         }
+       }
+       if (struct.isSetPrivileges()) {
+         struct.privileges.write(oprot);
+       }
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
++      if (struct.isSetWriteId()) {
++        oprot.writeI64(struct.writeId);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
 -      BitSet incoming = iprot.readBitSet(9);
++      BitSet incoming = iprot.readBitSet(11);
+       if (incoming.get(0)) {
+         {
+           org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+           struct.values = new ArrayList<String>(_list227.size);
+           String _elem228;
+           for (int _i229 = 0; _i229 < _list227.size; ++_i229)
+           {
+             _elem228 = iprot.readString();
+             struct.values.add(_elem228);
+           }
+         }
+         struct.setValuesIsSet(true);
+       }
+       if (incoming.get(1)) {
+         struct.dbName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+         struct.setDbNameIsSet(true);
+       }
+       if (incoming.get(2)) {
+         struct.tableName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+         struct.setTableNameIsSet(true);
+       }
+       if (incoming.get(3)) {
+         struct.createTime = iprot.readI32();
+         struct.setCreateTimeIsSet(true);
+       }
+       if (incoming.get(4)) {
+         struct.lastAccessTime = iprot.readI32();
+         struct.setLastAccessTimeIsSet(true);
+       }
+       if (incoming.get(5)) {
+         struct.sd = new StorageDescriptor();
+         struct.sd.read(iprot);
+         struct.setSdIsSet(true);
+       }
+       if (incoming.get(6)) {
+         {
+           org.apache.thrift.protocol.TMap _map230 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+           struct.parameters = new HashMap<String,String>(2*_map230.size);
+           String _key231;
+           String _val232;
+           for (int _i233 = 0; _i233 < _map230.size; ++_i233)
+           {
+             _key231 = iprot.readString();
+             _val232 = iprot.readString();
+             struct.parameters.put(_key231, _val232);
+           }
+         }
+         struct.parameters = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(struct.parameters); struct.setParametersIsSet(true);
+       }
+       if (incoming.get(7)) {
+         struct.privileges = new PrincipalPrivilegeSet();
+         struct.privileges.read(iprot);
+         struct.setPrivilegesIsSet(true);
+       }
+       if (incoming.get(8)) {
+         struct.catName = org.apache.hadoop.hive.metastore.utils.StringUtils.intern(iprot.readString());
+         struct.setCatNameIsSet(true);
+       }
++      if (incoming.get(9)) {
++        struct.writeId = iprot.readI64();
++        struct.setWriteIdIsSet(true);
++      }
++      if (incoming.get(10)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[55/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
index 0000000,53c4d24..6ef416f
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClientPreCatalog.java
@@@ -1,0 -1,3427 +1,3535 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_DATABASE_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
++import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.prependCatalogToDbName;
+ 
+ import java.io.IOException;
+ import java.lang.reflect.Constructor;
+ import java.lang.reflect.InvocationHandler;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.lang.reflect.Proxy;
+ import java.net.InetAddress;
+ import java.net.URI;
+ import java.net.UnknownHostException;
+ import java.nio.ByteBuffer;
+ import java.security.PrivilegedExceptionAction;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.Collection;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.LinkedHashMap;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ import java.util.NoSuchElementException;
+ import java.util.Random;
+ import java.util.concurrent.TimeUnit;
+ import java.util.concurrent.atomic.AtomicInteger;
+ 
+ import javax.security.auth.login.LoginException;
+ 
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.hooks.URIResolverHook;
+ import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
+ import org.apache.hadoop.hive.metastore.security.HadoopThriftAuthBridge;
+ import org.apache.hadoop.hive.metastore.txn.TxnUtils;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.apache.hadoop.hive.metastore.utils.ObjectPair;
+ import org.apache.hadoop.hive.metastore.utils.SecurityUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ReflectionUtils;
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.thrift.TApplicationException;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.protocol.TBinaryProtocol;
+ import org.apache.thrift.protocol.TCompactProtocol;
+ import org.apache.thrift.protocol.TProtocol;
+ import org.apache.thrift.transport.TFramedTransport;
+ import org.apache.thrift.transport.TSocket;
+ import org.apache.thrift.transport.TTransport;
+ import org.apache.thrift.transport.TTransportException;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ /**
+  * Hive Metastore Client.
+  * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient
+  * are not public and can change. Hence this is marked as unstable.
+  * For users who require retry mechanism when the connection between metastore and client is
+  * broken, RetryingMetaStoreClient class should be used.
+  */
+ @InterfaceAudience.Public
+ @InterfaceStability.Evolving
+ public class HiveMetaStoreClientPreCatalog implements IMetaStoreClient, AutoCloseable {
+   /**
+    * Capabilities of the current client. If this client talks to a MetaStore server in a manner
+    * implying the usage of some expanded features that require client-side support that this client
+    * doesn't have (e.g. a getting a table of a new type), it will get back failures when the
+    * capability checking is enabled (the default).
+    */
+   public final static ClientCapabilities VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES));
+   // Test capability for tests.
+   public final static ClientCapabilities TEST_VERSION = new ClientCapabilities(
+       Lists.newArrayList(ClientCapability.INSERT_ONLY_TABLES, ClientCapability.TEST_CAPABILITY));
+ 
+   ThriftHiveMetastore.Iface client = null;
+   private TTransport transport = null;
+   private boolean isConnected = false;
+   private URI metastoreUris[];
+   private final HiveMetaHookLoader hookLoader;
+   protected final Configuration conf;  // Keep a copy of HiveConf so if Session conf changes, we may need to get a new HMS client.
+   protected boolean fastpath = false;
+   private String tokenStrForm;
+   private final boolean localMetaStore;
+   private final MetaStoreFilterHook filterHook;
+   private final URIResolverHook uriResolverHook;
+   private final int fileMetadataBatchSize;
+ 
+   private Map<String, String> currentMetaVars;
+ 
+   private static final AtomicInteger connCount = new AtomicInteger(0);
+ 
+   // for thrift connects
+   private int retries = 5;
+   private long retryDelaySeconds = 0;
+   private final ClientCapabilities version;
+ 
+   static final protected Logger LOG = LoggerFactory.getLogger(HiveMetaStoreClientPreCatalog.class);
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf) throws MetaException {
+     this(conf, null, true);
+   }
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader) throws MetaException {
+     this(conf, hookLoader, true);
+   }
+ 
+   public HiveMetaStoreClientPreCatalog(Configuration conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded)
+     throws MetaException {
+ 
+     this.hookLoader = hookLoader;
+     if (conf == null) {
+       conf = MetastoreConf.newMetastoreConf();
+       this.conf = conf;
+     } else {
+       this.conf = new Configuration(conf);
+     }
+     version = MetastoreConf.getBoolVar(conf, ConfVars.HIVE_IN_TEST) ? TEST_VERSION : VERSION;
+     filterHook = loadFilterHooks();
+     uriResolverHook = loadUriResolverHook();
+     fileMetadataBatchSize = MetastoreConf.getIntVar(
+         conf, ConfVars.BATCH_RETRIEVE_OBJECTS_MAX);
+ 
+     String msUri = MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS);
+     localMetaStore = MetastoreConf.isEmbeddedMetaStore(msUri);
+     if (localMetaStore) {
+       if (!allowEmbedded) {
+         throw new MetaException("Embedded metastore is not allowed here. Please configure "
+             + ConfVars.THRIFT_URIS.toString() + "; it is currently set to [" + msUri + "]");
+       }
+       // instantiate the metastore server handler directly instead of connecting
+       // through the network
+       client = HiveMetaStore.newRetryingHMSHandler("hive client", this.conf, true);
+       // Initialize materializations invalidation cache (only for local metastore)
+       MaterializationsInvalidationCache.get().init(conf, (IHMSHandler) client);
+       isConnected = true;
+       snapshotActiveConf();
+       return;
+     }
+ 
+     // get the number retries
+     retries = MetastoreConf.getIntVar(conf, ConfVars.THRIFT_CONNECTION_RETRIES);
+     retryDelaySeconds = MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS);
+ 
+     // user wants file store based configuration
+     if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URIS) != null) {
+       resolveUris();
+     } else {
+       LOG.error("NOT getting uris from conf");
+       throw new MetaException("MetaStoreURIs not found in conf file");
+     }
+ 
+     //If HADOOP_PROXY_USER is set in env or property,
+     //then need to create metastore client that proxies as that user.
+     String HADOOP_PROXY_USER = "HADOOP_PROXY_USER";
+     String proxyUser = System.getenv(HADOOP_PROXY_USER);
+     if (proxyUser == null) {
+       proxyUser = System.getProperty(HADOOP_PROXY_USER);
+     }
+     //if HADOOP_PROXY_USER is set, create DelegationToken using real user
+     if(proxyUser != null) {
+       LOG.info(HADOOP_PROXY_USER + " is set. Using delegation "
+           + "token for HiveMetaStore connection.");
+       try {
+         UserGroupInformation.getLoginUser().getRealUser().doAs(
+             new PrivilegedExceptionAction<Void>() {
+               @Override
+               public Void run() throws Exception {
+                 open();
+                 return null;
+               }
+             });
+         String delegationTokenPropString = "DelegationTokenForHiveMetaStoreServer";
+         String delegationTokenStr = getDelegationToken(proxyUser, proxyUser);
+         SecurityUtils.setTokenStr(UserGroupInformation.getCurrentUser(), delegationTokenStr,
+             delegationTokenPropString);
+         MetastoreConf.setVar(this.conf, ConfVars.TOKEN_SIGNATURE, delegationTokenPropString);
+         close();
+       } catch (Exception e) {
+         LOG.error("Error while setting delegation token for " + proxyUser, e);
+         if(e instanceof MetaException) {
+           throw (MetaException)e;
+         } else {
+           throw new MetaException(e.getMessage());
+         }
+       }
+     }
+     // finally open the store
+     open();
+   }
+ 
+   private void resolveUris() throws MetaException {
+     String metastoreUrisString[] =  MetastoreConf.getVar(conf,
+             ConfVars.THRIFT_URIS).split(",");
+ 
+     List<URI> metastoreURIArray = new ArrayList<URI>();
+     try {
+       int i = 0;
+       for (String s : metastoreUrisString) {
+         URI tmpUri = new URI(s);
+         if (tmpUri.getScheme() == null) {
+           throw new IllegalArgumentException("URI: " + s
+                   + " does not have a scheme");
+         }
+         if (uriResolverHook != null) {
+           metastoreURIArray.addAll(uriResolverHook.resolveURI(tmpUri));
+         } else {
+           metastoreURIArray.add(new URI(
+                   tmpUri.getScheme(),
+                   tmpUri.getUserInfo(),
+                   HadoopThriftAuthBridge.getBridge().getCanonicalHostName(tmpUri.getHost()),
+                   tmpUri.getPort(),
+                   tmpUri.getPath(),
+                   tmpUri.getQuery(),
+                   tmpUri.getFragment()
+           ));
+         }
+       }
+       metastoreUris = new URI[metastoreURIArray.size()];
+       for (int j = 0; j < metastoreURIArray.size(); j++) {
+         metastoreUris[j] = metastoreURIArray.get(j);
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         List uriList = Arrays.asList(metastoreUris);
+         Collections.shuffle(uriList);
+         metastoreUris = (URI[]) uriList.toArray();
+       }
+     } catch (IllegalArgumentException e) {
+       throw (e);
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+   }
+ 
+ 
+   private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException {
+     Class<? extends MetaStoreFilterHook> authProviderClass = MetastoreConf.
+         getClass(conf, ConfVars.FILTER_HOOK, DefaultMetaStoreFilterHookImpl.class,
+             MetaStoreFilterHook.class);
+     String msg = "Unable to create instance of " + authProviderClass.getName() + ": ";
+     try {
+       Constructor<? extends MetaStoreFilterHook> constructor =
+           authProviderClass.getConstructor(Configuration.class);
+       return constructor.newInstance(conf);
+     } catch (NoSuchMethodException | SecurityException | IllegalAccessException | InstantiationException | IllegalArgumentException | InvocationTargetException e) {
+       throw new IllegalStateException(msg + e.getMessage(), e);
+     }
+   }
+ 
+   //multiple clients may initialize the hook at the same time
+   synchronized private URIResolverHook loadUriResolverHook() throws IllegalStateException {
+ 
+     String uriResolverClassName =
+             MetastoreConf.getAsString(conf, ConfVars.URI_RESOLVER);
+     if (uriResolverClassName.equals("")) {
+       return null;
+     } else {
+       LOG.info("Loading uri resolver" + uriResolverClassName);
+       try {
+         Class<?> uriResolverClass = Class.forName(uriResolverClassName, true,
+                 JavaUtils.getClassLoader());
+         return (URIResolverHook) ReflectionUtils.newInstance(uriResolverClass, null);
+       } catch (Exception e) {
+         LOG.error("Exception loading uri resolver hook" + e);
+         return null;
+       }
+     }
+   }
+ 
+   /**
+    * Swaps the first element of the metastoreUris array with a random element from the
+    * remainder of the array.
+    */
+   private void promoteRandomMetaStoreURI() {
+     if (metastoreUris.length <= 1) {
+       return;
+     }
+     Random rng = new Random();
+     int index = rng.nextInt(metastoreUris.length - 1) + 1;
+     URI tmp = metastoreUris[0];
+     metastoreUris[0] = metastoreUris[index];
+     metastoreUris[index] = tmp;
+   }
+ 
+   @VisibleForTesting
+   public TTransport getTTransport() {
+     return transport;
+   }
+ 
+   @Override
+   public boolean isLocalMetaStore() {
+     return localMetaStore;
+   }
+ 
+   @Override
+   public boolean isCompatibleWith(Configuration conf) {
+     // Make a copy of currentMetaVars, there is a race condition that
+ 	// currentMetaVars might be changed during the execution of the method
+     Map<String, String> currentMetaVarsCopy = currentMetaVars;
+     if (currentMetaVarsCopy == null) {
+       return false; // recreate
+     }
+     boolean compatible = true;
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       // Since metaVars are all of different types, use string for comparison
+       String oldVar = currentMetaVarsCopy.get(oneVar.getVarname());
+       String newVar = MetastoreConf.getAsString(conf, oneVar);
+       if (oldVar == null ||
+           (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) {
+         LOG.info("Mestastore configuration " + oneVar.toString() +
+             " changed from " + oldVar + " to " + newVar);
+         compatible = false;
+       }
+     }
+     return compatible;
+   }
+ 
+   @Override
+   public void setHiveAddedJars(String addedJars) {
+     MetastoreConf.setVar(conf, ConfVars.ADDED_JARS, addedJars);
+   }
+ 
+   @Override
+   public void reconnect() throws MetaException {
+     if (localMetaStore) {
+       // For direct DB connections we don't yet support reestablishing connections.
+       throw new MetaException("For direct MetaStore DB connections, we don't support retries" +
+           " at the client level.");
+     } else {
+       close();
+ 
+       if (uriResolverHook != null) {
+         //for dynamic uris, re-lookup if there are new metastore locations
+         resolveUris();
+       }
+ 
+       if (MetastoreConf.getVar(conf, ConfVars.THRIFT_URI_SELECTION).equalsIgnoreCase("RANDOM")) {
+         // Swap the first element of the metastoreUris[] with a random element from the rest
+         // of the array. Rationale being that this method will generally be called when the default
+         // connection has died and the default connection is likely to be the first array element.
+         promoteRandomMetaStoreURI();
+       }
+       open();
+     }
+   }
+ 
+   /**
+    * @param dbname
+    * @param tbl_name
+    * @param new_tbl
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see
+    *   org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table(
+    *   java.lang.String, java.lang.String,
+    *   org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void alter_table(String dbname, String tbl_name, Table new_tbl)
+       throws InvalidOperationException, MetaException, TException {
+     alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null);
+   }
+ 
+   @Override
+   public void alter_table(String defaultDatabaseName, String tblName, Table table,
+       boolean cascade) throws InvalidOperationException, MetaException, TException {
+     EnvironmentContext environmentContext = new EnvironmentContext();
+     if (cascade) {
+       environmentContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE);
+     }
+     alter_table_with_environmentContext(defaultDatabaseName, tblName, table, environmentContext);
+   }
+ 
+   @Override
+   public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl,
+       EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException {
+     client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext);
+   }
+ 
+   /**
+    * @param dbname
+    * @param name
+    * @param part_vals
+    * @param newPart
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition(
+    *      java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public void renamePartition(final String dbname, final String name, final List<String> part_vals, final Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     client.rename_partition(dbname, name, part_vals, newPart);
+   }
+ 
+   private void open() throws MetaException {
+     isConnected = false;
+     TTransportException tte = null;
+     boolean useSSL = MetastoreConf.getBoolVar(conf, ConfVars.USE_SSL);
+     boolean useSasl = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_SASL);
+     boolean useFramedTransport = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_FRAMED_TRANSPORT);
+     boolean useCompactProtocol = MetastoreConf.getBoolVar(conf, ConfVars.USE_THRIFT_COMPACT_PROTOCOL);
+     int clientSocketTimeout = (int) MetastoreConf.getTimeVar(conf,
+         ConfVars.CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS);
+ 
+     for (int attempt = 0; !isConnected && attempt < retries; ++attempt) {
+       for (URI store : metastoreUris) {
+         LOG.info("Trying to connect to metastore with URI " + store);
+ 
+         try {
+           if (useSSL) {
+             try {
+               String trustStorePath = MetastoreConf.getVar(conf, ConfVars.SSL_TRUSTSTORE_PATH).trim();
+               if (trustStorePath.isEmpty()) {
+                 throw new IllegalArgumentException(ConfVars.SSL_TRUSTSTORE_PATH.toString()
+                     + " Not configured for SSL connection");
+               }
+               String trustStorePassword =
+                   MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.SSL_TRUSTSTORE_PASSWORD);
+ 
+               // Create an SSL socket and connect
+               transport = SecurityUtils.getSSLSocket(store.getHost(), store.getPort(), clientSocketTimeout,
+                   trustStorePath, trustStorePassword );
+               LOG.info("Opened an SSL connection to metastore, current connections: " + connCount.incrementAndGet());
+             } catch(IOException e) {
+               throw new IllegalArgumentException(e);
+             } catch(TTransportException e) {
+               tte = e;
+               throw new MetaException(e.toString());
+             }
+           } else {
+             transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout);
+           }
+ 
+           if (useSasl) {
+             // Wrap thrift connection with SASL for secure connection.
+             try {
+               HadoopThriftAuthBridge.Client authBridge =
+                 HadoopThriftAuthBridge.getBridge().createClient();
+ 
+               // check if we should use delegation tokens to authenticate
+               // the call below gets hold of the tokens if they are set up by hadoop
+               // this should happen on the map/reduce tasks if the client added the
+               // tokens into hadoop's credential store in the front end during job
+               // submission.
+               String tokenSig = MetastoreConf.getVar(conf, ConfVars.TOKEN_SIGNATURE);
+               // tokenSig could be null
+               tokenStrForm = SecurityUtils.getTokenStrForm(tokenSig);
+ 
+               if(tokenStrForm != null) {
+                 LOG.info("HMSC::open(): Found delegation token. Creating DIGEST-based thrift connection.");
+                 // authenticate using delegation tokens via the "DIGEST" mechanism
+                 transport = authBridge.createClientTransport(null, store.getHost(),
+                     "DIGEST", tokenStrForm, transport,
+                         MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               } else {
+                 LOG.info("HMSC::open(): Could not find delegation token. Creating KERBEROS-based thrift connection.");
+                 String principalConfig =
+                     MetastoreConf.getVar(conf, ConfVars.KERBEROS_PRINCIPAL);
+                 transport = authBridge.createClientTransport(
+                     principalConfig, store.getHost(), "KERBEROS", null,
+                     transport, MetaStoreUtils.getMetaStoreSaslProperties(conf, useSSL));
+               }
+             } catch (IOException ioe) {
+               LOG.error("Couldn't create client transport", ioe);
+               throw new MetaException(ioe.toString());
+             }
+           } else {
+             if (useFramedTransport) {
+               transport = new TFramedTransport(transport);
+             }
+           }
+ 
+           final TProtocol protocol;
+           if (useCompactProtocol) {
+             protocol = new TCompactProtocol(transport);
+           } else {
+             protocol = new TBinaryProtocol(transport);
+           }
+           client = new ThriftHiveMetastore.Client(protocol);
+           try {
+             if (!transport.isOpen()) {
+               transport.open();
+               LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet());
+             }
+             isConnected = true;
+           } catch (TTransportException e) {
+             tte = e;
+             if (LOG.isDebugEnabled()) {
+               LOG.warn("Failed to connect to the MetaStore Server...", e);
+             } else {
+               // Don't print full exception trace if DEBUG is not on.
+               LOG.warn("Failed to connect to the MetaStore Server...");
+             }
+           }
+ 
+           if (isConnected && !useSasl && MetastoreConf.getBoolVar(conf, ConfVars.EXECUTE_SET_UGI)){
+             // Call set_ugi, only in unsecure mode.
+             try {
+               UserGroupInformation ugi = SecurityUtils.getUGI();
+               client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames()));
+             } catch (LoginException e) {
+               LOG.warn("Failed to do login. set_ugi() is not successful, " +
+                        "Continuing without it.", e);
+             } catch (IOException e) {
+               LOG.warn("Failed to find ugi of client set_ugi() is not successful, " +
+                   "Continuing without it.", e);
+             } catch (TException e) {
+               LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. "
+                   + "Continuing without it.", e);
+             }
+           }
+         } catch (MetaException e) {
+           LOG.error("Unable to connect to metastore with URI " + store
+                     + " in attempt " + attempt, e);
+         }
+         if (isConnected) {
+           break;
+         }
+       }
+       // Wait before launching the next round of connection retries.
+       if (!isConnected && retryDelaySeconds > 0) {
+         try {
+           LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt.");
+           Thread.sleep(retryDelaySeconds * 1000);
+         } catch (InterruptedException ignore) {}
+       }
+     }
+ 
+     if (!isConnected) {
+       throw new MetaException("Could not connect to meta store using any of the URIs provided." +
+         " Most recent failure: " + StringUtils.stringifyException(tte));
+     }
+ 
+     snapshotActiveConf();
+ 
+     LOG.info("Connected to metastore.");
+   }
+ 
+   private void snapshotActiveConf() {
+     currentMetaVars = new HashMap<>(MetastoreConf.metaVars.length);
+     for (ConfVars oneVar : MetastoreConf.metaVars) {
+       currentMetaVars.put(oneVar.getVarname(), MetastoreConf.getAsString(conf, oneVar));
+     }
+   }
+ 
+   @Override
+   public String getTokenStrForm() throws IOException {
+     return tokenStrForm;
+    }
+ 
+   @Override
+   public void close() {
+     isConnected = false;
+     currentMetaVars = null;
+     try {
+       if (null != client) {
+         client.shutdown();
+       }
+     } catch (TException e) {
+       LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e);
+     }
+     // Transport would have got closed via client.shutdown(), so we dont need this, but
+     // just in case, we make this call.
+     if ((transport != null) && transport.isOpen()) {
+       transport.close();
+       LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet());
+     }
+   }
+ 
+   @Override
+   public void setMetaConf(String key, String value) throws TException {
+     client.setMetaConf(key, value);
+   }
+ 
+   @Override
+   public String getMetaConf(String key) throws TException {
+     return client.getMetaConf(key);
+   }
+ 
+   /**
+    * @param new_part
+    * @return the added partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition)
+    */
+   @Override
+   public Partition add_partition(Partition new_part) throws TException {
+     return add_partition(new_part, null);
+   }
+ 
+   public Partition add_partition(Partition new_part, EnvironmentContext envContext)
+       throws TException {
+     Partition p = client.add_partition_with_environment_context(new_part, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   /**
+    * @param new_parts
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List)
+    */
+   @Override
+   public int add_partitions(List<Partition> new_parts) throws TException {
+     return client.add_partitions(new_parts);
+   }
+ 
+   @Override
+   public List<Partition> add_partitions(
+       List<Partition> parts, boolean ifNotExists, boolean needResults) throws TException {
+     if (parts.isEmpty()) {
+       return needResults ? new ArrayList<>() : null;
+     }
+     Partition part = parts.get(0);
+     AddPartitionsRequest req = new AddPartitionsRequest(
+         part.getDbName(), part.getTableName(), parts, ifNotExists);
+     req.setNeedResult(needResults);
+     AddPartitionsResult result = client.add_partitions_req(req);
+     return needResults ? filterHook.filterPartitions(result.getPartitions()) : null;
+   }
+ 
+   @Override
+   public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException {
+     return client.add_partitions_pspec(partitionSpec.toPartitionSpec());
+   }
+ 
+   /**
+    * @param table_name
+    * @param db_name
+    * @param part_vals
+    * @return the appended partition
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String,
+    *      java.lang.String, java.util.List)
+    */
+   @Override
+   public Partition appendPartition(String db_name, String table_name,
+       List<String> part_vals) throws TException {
+     return appendPartition(db_name, table_name, part_vals, null);
+   }
+ 
+   public Partition appendPartition(String db_name, String table_name, List<String> part_vals,
+       EnvironmentContext envContext) throws TException {
+     Partition p = client.append_partition_with_environment_context(db_name, table_name,
+         part_vals, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   @Override
+   public Partition appendPartition(String dbName, String tableName, String partName)
+       throws TException {
+     return appendPartition(dbName, tableName, partName, (EnvironmentContext)null);
+   }
+ 
+   public Partition appendPartition(String dbName, String tableName, String partName,
+       EnvironmentContext envContext) throws TException {
+     Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+         partName, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   /**
+    * Exchange the partition between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partition after exchanging
+    */
+   @Override
+   public Partition exchange_partition(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, TException {
+     return client.exchange_partition(partitionSpecs, sourceDb, sourceTable,
+         destDb, destinationTableName);
+   }
+ 
+   /**
+    * Exchange the partitions between two tables
+    * @param partitionSpecs partitions specs of the parent partition to be exchanged
+    * @param destDb the db of the destination table
+    * @param destinationTableName the destination table name
+    * @return new partitions after exchanging
+    */
+   @Override
+   public List<Partition> exchange_partitions(Map<String, String> partitionSpecs,
+       String sourceDb, String sourceTable, String destDb,
+       String destinationTableName) throws MetaException,
+       NoSuchObjectException, InvalidObjectException, TException {
+     return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable,
+         destDb, destinationTableName);
+   }
+ 
+   @Override
+   public void validatePartitionNameCharacters(List<String> partVals)
+       throws TException, MetaException {
+     client.partition_name_has_valid_characters(partVals, true);
+   }
+ 
+   /**
+    * Create a new Database
+    * @param db
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database)
+    */
+   @Override
+   public void createDatabase(Database db)
+       throws AlreadyExistsException, InvalidObjectException, MetaException, TException {
+     client.create_database(db);
+   }
+ 
+   /**
+    * @param tbl
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table)
+    */
+   @Override
+   public void createTable(Table tbl) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     createTable(tbl, null);
+   }
+ 
+   public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, NoSuchObjectException, TException {
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       create_table_with_environment_context(tbl, envContext);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     }
+     finally {
+       if (!success && (hook != null)) {
+         try {
+           hook.rollbackCreateTable(tbl);
+         } catch (Exception e){
+           LOG.error("Create rollback failed with", e);
+         }
+       }
+     }
+   }
+ 
+   @Override
+   public void createTableWithConstraints(Table tbl,
+       List<SQLPrimaryKey> primaryKeys, List<SQLForeignKey> foreignKeys,
+       List<SQLUniqueConstraint> uniqueConstraints,
+       List<SQLNotNullConstraint> notNullConstraints,
+       List<SQLDefaultConstraint> defaultConstraints,
+       List<SQLCheckConstraint> checkConstraints)
+         throws AlreadyExistsException, InvalidObjectException,
+         MetaException, NoSuchObjectException, TException {
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preCreateTable(tbl);
+     }
+     boolean success = false;
+     try {
+       // Subclasses can override this step (for example, for temporary tables)
+       client.create_table_with_constraints(tbl, primaryKeys, foreignKeys,
+           uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints);
+       if (hook != null) {
+         hook.commitCreateTable(tbl);
+       }
+       success = true;
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackCreateTable(tbl);
+       }
+     }
+   }
+ 
+   @Override
+   public void dropConstraint(String dbName, String tableName, String constraintName) throws
+     NoSuchObjectException, MetaException, TException {
+     client.drop_constraint(new DropConstraintRequest(dbName, tableName, constraintName));
+   }
+ 
+   @Override
+   public void addPrimaryKey(List<SQLPrimaryKey> primaryKeyCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_primary_key(new AddPrimaryKeyRequest(primaryKeyCols));
+   }
+ 
+   @Override
+   public void addForeignKey(List<SQLForeignKey> foreignKeyCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_foreign_key(new AddForeignKeyRequest(foreignKeyCols));
+   }
+ 
+   @Override
+   public void addUniqueConstraint(List<SQLUniqueConstraint> uniqueConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_unique_constraint(new AddUniqueConstraintRequest(uniqueConstraintCols));
+   }
+ 
+   @Override
+   public void addNotNullConstraint(List<SQLNotNullConstraint> notNullConstraintCols) throws
+     NoSuchObjectException, MetaException, TException {
+     client.add_not_null_constraint(new AddNotNullConstraintRequest(notNullConstraintCols));
+   }
+ 
+   @Override
+   public void addDefaultConstraint(List<SQLDefaultConstraint> defaultConstraints) throws
+       NoSuchObjectException, MetaException, TException {
+     client.add_default_constraint(new AddDefaultConstraintRequest(defaultConstraints));
+   }
+ 
+   @Override
+   public void addCheckConstraint(List<SQLCheckConstraint> checkConstraints) throws MetaException,
+       NoSuchObjectException, TException {
+     client.add_check_constraint(new AddCheckConstraintRequest(checkConstraints));
+   }
+ 
+   /**
+    * @param type
+    * @return true or false
+    * @throws AlreadyExistsException
+    * @throws InvalidObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type)
+    */
+   public boolean createType(Type type) throws AlreadyExistsException,
+       InvalidObjectException, MetaException, TException {
+     return client.create_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @throws NoSuchObjectException
+    * @throws InvalidOperationException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean)
+    */
+   @Override
+   public void dropDatabase(String name)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(name, true, false, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     dropDatabase(name, deleteData, ignoreUnknownDb, false);
+   }
+ 
+   @Override
+   public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade)
+       throws NoSuchObjectException, InvalidOperationException, MetaException, TException {
+     try {
+       getDatabase(name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownDb) {
+         throw e;
+       }
+       return;
+     }
+ 
+     if (cascade) {
+        List<String> tableList = getAllTables(name);
+        for (String table : tableList) {
+          try {
+            // Subclasses can override this step (for example, for temporary tables)
+            dropTable(name, table, deleteData, true);
+          } catch (UnsupportedOperationException e) {
+            // Ignore Index tables, those will be dropped with parent tables
+          }
+         }
+     }
+     client.drop_database(name, deleteData, cascade);
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param part_vals
+    * @return true or false
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+    *      java.lang.String, java.util.List, boolean)
+    */
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals) throws NoSuchObjectException, MetaException,
+       TException {
+     return dropPartition(db_name, tbl_name, part_vals, true, null);
+   }
+ 
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException {
+     return dropPartition(db_name, tbl_name, part_vals, true, env_context);
+   }
+ 
+   @Override
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData)
+       throws NoSuchObjectException, MetaException, TException {
+     return dropPartition(dbName, tableName, partName, deleteData, null);
+   }
+ 
+   private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() {
+     Map<String, String> warehouseOptions = new HashMap<>();
+     warehouseOptions.put("ifPurge", "TRUE");
+     return new EnvironmentContext(warehouseOptions);
+   }
+ 
+   /*
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge)
+       throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartition(dbName, tableName, partName, deleteData,
+                          ifPurge? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+   */
+ 
+   public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData,
+       EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+         deleteData, envContext);
+   }
+ 
+   /**
+    * @param db_name
+    * @param tbl_name
+    * @param part_vals
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @return true or false
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String,
+    *      java.lang.String, java.util.List, boolean)
+    */
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, boolean deleteData) throws NoSuchObjectException,
+       MetaException, TException {
+     return dropPartition(db_name, tbl_name, part_vals, deleteData, null);
+   }
+ 
+   @Override
+   public boolean dropPartition(String db_name, String tbl_name,
+       List<String> part_vals, PartitionDropOptions options) throws TException {
+     return dropPartition(db_name, tbl_name, part_vals, options.deleteData,
+                          options.purgeData? getEnvironmentContextWithIfPurgeSet() : null);
+   }
+ 
+   public boolean dropPartition(String db_name, String tbl_name, List<String> part_vals,
+       boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+       MetaException, TException {
+     return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData,
+         envContext);
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+                                         List<ObjectPair<Integer, byte[]>> partExprs, PartitionDropOptions options)
+       throws TException {
+     RequestPartsSpec rps = new RequestPartsSpec();
+     List<DropPartitionsExpr> exprs = new ArrayList<>(partExprs.size());
+     for (ObjectPair<Integer, byte[]> partExpr : partExprs) {
+       DropPartitionsExpr dpe = new DropPartitionsExpr();
+       dpe.setExpr(partExpr.getSecond());
+       dpe.setPartArchiveLevel(partExpr.getFirst());
+       exprs.add(dpe);
+     }
+     rps.setExprs(exprs);
+     DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps);
+     req.setDeleteData(options.deleteData);
+     req.setNeedResult(options.returnResults);
+     req.setIfExists(options.ifExists);
+     if (options.purgeData) {
+       LOG.info("Dropped partitions will be purged!");
+       req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet());
+     }
+     return client.drop_partitions_req(req).getPartitions();
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException {
+ 
+     return dropPartitions(dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists)
+                                               .returnResults(needResult));
+ 
+   }
+ 
+   @Override
+   public List<Partition> dropPartitions(String dbName, String tblName,
+       List<ObjectPair<Integer, byte[]>> partExprs, boolean deleteData,
+       boolean ifExists) throws NoSuchObjectException, MetaException, TException {
+     // By default, we need the results from dropPartitions();
+     return dropPartitions(dbName, tblName, partExprs,
+                           PartitionDropOptions.instance()
+                                               .deleteData(deleteData)
+                                               .ifExists(ifExists));
+   }
+ 
+   /**
+    * {@inheritDoc}
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     dropTable(dbname, name, deleteData, ignoreUnknownTab, null);
+   }
+ 
+   /**
+    * Drop the table and choose whether to save the data in the trash.
+    * @param ifPurge completely purge the table (skipping trash) while removing
+    *                data from warehouse
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, boolean ifPurge)
+       throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException {
+     //build new environmentContext with ifPurge;
+     EnvironmentContext envContext = null;
+     if(ifPurge){
+       Map<String, String> warehouseOptions;
+       warehouseOptions = new HashMap<>();
+       warehouseOptions.put("ifPurge", "TRUE");
+       envContext = new EnvironmentContext(warehouseOptions);
+     }
+     dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext);
+   }
+ 
+   /**
+    * @see #dropTable(String, String, boolean, boolean, EnvironmentContext)
+    */
+   @Override
+   public void dropTable(String dbname, String name)
+       throws NoSuchObjectException, MetaException, TException {
+     dropTable(dbname, name, true, true, null);
+   }
+ 
+   /**
+    * Drop the table and choose whether to: delete the underlying table data;
+    * throw if the table doesn't exist; save the data in the trash.
+    *
+    * @param dbname
+    * @param name
+    * @param deleteData
+    *          delete the underlying data or just delete the table in metadata
+    * @param ignoreUnknownTab
+    *          don't throw if the requested table doesn't exist
+    * @param envContext
+    *          for communicating with thrift
+    * @throws MetaException
+    *           could not drop table properly
+    * @throws NoSuchObjectException
+    *           the table wasn't found
+    * @throws TException
+    *           a thrift communication error occurred
+    * @throws UnsupportedOperationException
+    *           dropping an index table is not allowed
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String,
+    *      java.lang.String, boolean)
+    */
+   public void dropTable(String dbname, String name, boolean deleteData,
+       boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException,
+       NoSuchObjectException, UnsupportedOperationException {
+     Table tbl;
+     try {
+       tbl = getTable(dbname, name);
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+       return;
+     }
+     HiveMetaHook hook = getHook(tbl);
+     if (hook != null) {
+       hook.preDropTable(tbl);
+     }
+     boolean success = false;
+     try {
+       drop_table_with_environment_context(dbname, name, deleteData, envContext);
+       if (hook != null) {
+         hook.commitDropTable(tbl, deleteData || (envContext != null && "TRUE".equals(envContext.getProperties().get("ifPurge"))));
+       }
+       success=true;
+     } catch (NoSuchObjectException e) {
+       if (!ignoreUnknownTab) {
+         throw e;
+       }
+     } finally {
+       if (!success && (hook != null)) {
+         hook.rollbackDropTable(tbl);
+       }
+     }
+   }
+ 
+   /**
+    * Truncate the table/partitions in the DEFAULT database.
+    * @param dbName
+    *          The db to which the table to be truncate belongs to
+    * @param tableName
+    *          The table to truncate
+    * @param partNames
+    *          List of partitions to truncate. NULL will truncate the whole table/all partitions
+    * @throws MetaException
+    * @throws TException
+    *           Could not truncate table properly.
+    */
+   @Override
+   public void truncateTable(String dbName, String tableName, List<String> partNames) throws MetaException, TException {
+     client.truncate_table(dbName, tableName, partNames);
+   }
+ 
+   /**
+    * Recycles the files recursively from the input path to the cmroot directory either by copying or moving it.
+    *
+    * @param request Inputs for path of the data files to be recycled to cmroot and
+    *                isPurge flag when set to true files which needs to be recycled are not moved to Trash
+    * @return Response which is currently void
+    */
+   @Override
+   public CmRecycleResponse recycleDirToCmPath(CmRecycleRequest request) throws MetaException, TException {
+     return client.cm_recycle(request);
+   }
+ 
+   /**
+    * @param type
+    * @return true if the type is dropped
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String)
+    */
+   public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException {
+     return client.drop_type(type);
+   }
+ 
+   /**
+    * @param name
+    * @return map of types
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String)
+    */
+   public Map<String, Type> getTypeAll(String name) throws MetaException,
+       TException {
+     Map<String, Type> result = null;
+     Map<String, Type> fromClient = client.get_type_all(name);
+     if (fromClient != null) {
+       result = new LinkedHashMap<>();
+       for (String key : fromClient.keySet()) {
+         result.put(key, deepCopy(fromClient.get(key)));
+       }
+     }
+     return result;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getDatabases(String databasePattern)
+     throws MetaException {
+     try {
+       return filterHook.filterDatabases(client.get_databases(databasePattern));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getAllDatabases() throws MetaException {
+     try {
+       return filterHook.filterDatabases(client.get_all_databases());
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param max_parts
+    * @return list of partitions
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    */
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+       short max_parts) throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions(db_name, tbl_name, max_parts);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_partitions_pspec(dbName, tableName, maxParts)));
+   }
+ 
+   @Override
+   public List<Partition> listPartitions(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name,
+       String tbl_name, short max_parts, String user_name, List<String> group_names)
+        throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts,
+         user_name, group_names);
+     return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public List<Partition> listPartitionsWithAuthInfo(String db_name,
+       String tbl_name, List<String> part_vals, short max_parts,
+       String user_name, List<String> group_names) throws NoSuchObjectException,
+       MetaException, TException {
+     List<Partition> parts = client.get_partitions_ps_with_auth(db_name,
+         tbl_name, part_vals, max_parts, user_name, group_names);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   /**
+    * Get list of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @param max_parts the maximum number of partitions to return,
+    *    all partitions are returned if -1 is passed
+    * @return list of partitions
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    */
+   @Override
+   public List<Partition> listPartitionsByFilter(String db_name, String tbl_name,
+       String filter, short max_parts) throws MetaException,
+          NoSuchObjectException, TException {
+     List<Partition> parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts);
+     return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name,
+                                                        String filter, int max_parts) throws MetaException,
+          NoSuchObjectException, TException {
+     return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs(
+         client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)));
+   }
+ 
+   @Override
+   public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr,
+       String default_partition_name, short max_parts, List<Partition> result)
+           throws TException {
+     assert result != null;
+     PartitionsByExprRequest req = new PartitionsByExprRequest(
+         db_name, tbl_name, ByteBuffer.wrap(expr));
+     if (default_partition_name != null) {
+       req.setDefaultPartitionName(default_partition_name);
+     }
+     if (max_parts >= 0) {
+       req.setMaxParts(max_parts);
+     }
+     PartitionsByExprResult r;
+     try {
+       r = client.get_partitions_by_expr(req);
+     } catch (TApplicationException te) {
+       // TODO: backward compat for Hive <= 0.12. Can be removed later.
+       if (te.getType() != TApplicationException.UNKNOWN_METHOD
+           && te.getType() != TApplicationException.WRONG_METHOD_NAME) {
+         throw te;
+       }
+       throw new IncompatibleMetastoreException(
+           "Metastore doesn't support listPartitionsByExpr: " + te.getMessage());
+     }
+     if (fastpath) {
+       result.addAll(r.getPartitions());
+     } else {
+       r.setPartitions(filterHook.filterPartitions(r.getPartitions()));
+       // TODO: in these methods, do we really need to deepcopy?
+       deepCopyPartitions(r.getPartitions(), result);
+     }
+     return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst.
+   }
+ 
+   /**
+    * @param name
+    * @return the database
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String)
+    */
+   @Override
+   public Database getDatabase(String name) throws NoSuchObjectException,
+       MetaException, TException {
+     Database d = client.get_database(name);
+     return fastpath ? d :deepCopy(filterHook.filterDatabase(d));
+   }
+ 
+   /**
+    * @param tbl_name
+    * @param db_name
+    * @param part_vals
+    * @return the partition
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String,
+    *      java.lang.String, java.util.List)
+    */
+   @Override
+   public Partition getPartition(String db_name, String tbl_name,
+       List<String> part_vals) throws NoSuchObjectException, MetaException, TException {
+     Partition p = client.get_partition(db_name, tbl_name, part_vals);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   @Override
+   public List<Partition> getPartitionsByNames(String db_name, String tbl_name,
+       List<String> part_names) throws NoSuchObjectException, MetaException, TException {
+     List<Partition> parts = client.get_partitions_by_names(db_name, tbl_name, part_names);
+     return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts));
+   }
+ 
+   @Override
+   public PartitionValuesResponse listPartitionValues(PartitionValuesRequest request)
+       throws MetaException, TException, NoSuchObjectException {
+     return client.get_partition_values(request);
+   }
+ 
+   @Override
+   public Partition getPartitionWithAuthInfo(String db_name, String tbl_name,
+       List<String> part_vals, String user_name, List<String> group_names)
+       throws MetaException, UnknownTableException, NoSuchObjectException,
+       TException {
+     Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name,
+         group_names);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   /**
+    * @param name
+    * @param dbname
+    * @return the table
+    * @throws NoSuchObjectException
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public Table getTable(String dbname, String name) throws MetaException,
+       TException, NoSuchObjectException {
+     GetTableRequest req = new GetTableRequest(dbname, name);
+     req.setCapabilities(version);
+     Table t = client.get_table_req(req).getTable();
+     return fastpath ? t : deepCopy(filterHook.filterTable(t));
+   }
+ 
++  @Override
++  public Table getTable(String dbName, String tableName, long txnId, String validWriteIdList)
++      throws MetaException, TException, NoSuchObjectException {
++    GetTableRequest req = new GetTableRequest(dbName, tableName);
++    req.setCapabilities(version);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(validWriteIdList);
++    Table t = client.get_table_req(req).getTable();
++    return fastpath ? t : deepCopy(filterHook.filterTable(t));
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public List<Table> getTableObjectsByName(String dbName, List<String> tableNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     GetTablesRequest req = new GetTablesRequest(dbName);
+     req.setTblNames(tableNames);
+     req.setCapabilities(version);
+     List<Table> tabs = client.get_table_objects_by_name_req(req).getTables();
+     return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public Map<String, Materialization> getMaterializationsInvalidationInfo(String dbName, List<String> viewNames)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     return client.get_materialization_invalidation_info(
+         dbName, filterHook.filterTableNames(null, dbName, viewNames));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public void updateCreationMetadata(String dbName, String tableName, CreationMetadata cm)
+       throws MetaException, InvalidOperationException, UnknownDBException, TException {
+     client.update_creation_metadata(null, dbName, tableName, cm);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> listTableNamesByFilter(String dbName, String filter, short maxTables)
+       throws MetaException, TException, InvalidOperationException, UnknownDBException {
+     return filterHook.filterTableNames(null, dbName,
+         client.get_table_names_by_filter(dbName, filter, maxTables));
+   }
+ 
+   /**
+    * @param name
+    * @return the type
+    * @throws MetaException
+    * @throws TException
+    * @throws NoSuchObjectException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String)
+    */
+   public Type getType(String name) throws NoSuchObjectException, MetaException, TException {
+     return deepCopy(client.get_type(name));
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getTables(String dbname, String tablePattern) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_tables(dbname, tablePattern));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getTables(String dbname, String tablePattern, TableType tableType) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname,
+           client.get_tables_by_type(dbname, tablePattern, tableType.toString()));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getMaterializedViewsForRewriting(String dbname) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_materialized_views_for_rewriting(dbname));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public List<TableMeta> getTableMeta(String dbPatterns, String tablePatterns, List<String> tableTypes)
+       throws MetaException {
+     try {
+       return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   private List<TableMeta> filterNames(List<TableMeta> metas) throws MetaException {
+     Map<String, TableMeta> sources = new LinkedHashMap<>();
+     Map<String, List<String>> dbTables = new LinkedHashMap<>();
+     for (TableMeta meta : metas) {
+       sources.put(meta.getDbName() + "." + meta.getTableName(), meta);
+       List<String> tables = dbTables.get(meta.getDbName());
+       if (tables == null) {
+         dbTables.put(meta.getDbName(), tables = new ArrayList<>());
+       }
+       tables.add(meta.getTableName());
+     }
+     List<TableMeta> filtered = new ArrayList<>();
+     for (Map.Entry<String, List<String>> entry : dbTables.entrySet()) {
+       for (String table : filterHook.filterTableNames(null, entry.getKey(), entry.getValue())) {
+         filtered.add(sources.get(entry.getKey() + "." + table));
+       }
+     }
+     return filtered;
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<String> getAllTables(String dbname) throws MetaException {
+     try {
+       return filterHook.filterTableNames(null, dbname, client.get_all_tables(dbname));
+     } catch (Exception e) {
+       MetaStoreUtils.logAndThrowMetaException(e);
+     }
+     return null;
+   }
+ 
+   @Override
+   public boolean tableExists(String databaseName, String tableName) throws MetaException,
+       TException, UnknownDBException {
+     try {
+       GetTableRequest req = new GetTableRequest(databaseName, tableName);
+       req.setCapabilities(version);
+       return filterHook.filterTable(client.get_table_req(req).getTable()) != null;
+     } catch (NoSuchObjectException e) {
+       return false;
+     }
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String dbName, String tblName,
+       short max) throws NoSuchObjectException, MetaException, TException {
+     return filterHook.filterPartitionNames(null, dbName, tblName,
+         client.get_partition_names(dbName, tblName, max));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String db_name, String tbl_name,
+       List<String> part_vals, short max_parts)
+       throws MetaException, TException, NoSuchObjectException {
+     return filterHook.filterPartitionNames(null, db_name, tbl_name,
+         client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts));
+   }
+ 
+   /**
+    * Get number of partitions matching specified filter
+    * @param db_name the database name
+    * @param tbl_name the table name
+    * @param filter the filter string,
+    *    for example "part1 = \"p1_abc\" and part2 &lt;= "\p2_test\"". Filtering can
+    *    be done only on string partition keys.
+    * @return number of partitions
+    * @throws MetaException
+    * @throws NoSuchObjectException
+    * @throws TException
+    */
+   @Override
+   public int getNumPartitionsByFilter(String db_name, String tbl_name,
+                                       String filter) throws MetaException,
+           NoSuchObjectException, TException {
+     return client.get_num_partitions_by_filter(db_name, tbl_name, filter);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart)
+       throws InvalidOperationException, MetaException, TException {
+     client.alter_partition_with_environment_context(dbName, tblName, newPart, null);
+   }
+ 
+   @Override
+   public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext)
+       throws InvalidOperationException, MetaException, TException {
+     client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts)
+       throws InvalidOperationException, MetaException, TException {
 -    client.alter_partitions_with_environment_context(dbName, tblName, newParts, null);
++    client.alter_partitions(dbName, tblName, newParts);
+   }
+ 
+   @Override
+   public void alter_partitions(String dbName, String tblName, List<Partition> newParts, EnvironmentContext environmentContext)
+   throws InvalidOperationException, MetaException, TException {
 -    client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext);
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(dbName);
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    // TODO: this is ugly... account for ability to pass via EC for the old API.
++    if (environmentContext != null && environmentContext.isSetProperties()
++        && environmentContext.getProperties().containsKey(StatsSetupConst.VALID_WRITE_IDS)) {
++      req.setTxnId(Long.parseLong(environmentContext.getProperties().get(StatsSetupConst.TXN_ID)));
++      req.setValidWriteIdList(environmentContext.getProperties().get(StatsSetupConst.VALID_WRITE_IDS));
++    }
++    client.alter_partitions_with_environment_context_req(req);
++  }
++
++  @Override
++  public void alter_partitions(String dbName, String tblName, List<Partition> newParts,
++                               EnvironmentContext environmentContext,
++                               long txnId, String writeIdList, long writeId)
++      throws InvalidOperationException, MetaException, TException {
++    AlterPartitionsRequest req = new AlterPartitionsRequest();
++    req.setDbName(dbName);
++    req.setTableName(tblName);
++    req.setPartitions(newParts);
++    req.setEnvironmentContext(environmentContext);
++    req.setTxnId(txnId);
++    req.setValidWriteIdList(writeIdList);
++    client.alter_partitions_with_environment_context_req(req);
+   }
+ 
+   @Override
+   public void alterDatabase(String dbName, Database db)
+       throws MetaException, NoSuchObjectException, TException {
+     client.alter_database(dbName, db);
+   }
+   /**
+    * @param db
+    * @param tableName
+    * @throws UnknownTableException
+    * @throws UnknownDBException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public List<FieldSchema> getFields(String db, String tableName)
+       throws MetaException, TException, UnknownTableException,
+       UnknownDBException {
+     List<FieldSchema> fields = client.get_fields(db, tableName);
+     return fastpath ? fields : deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_primary_keys(req).getPrimaryKeys();
+   }
+ 
+   @Override
+   public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest req) throws MetaException,
+     NoSuchObjectException, TException {
+     return client.get_foreign_keys(req).getForeignKeys();
+   }
+ 
+   @Override
+   public List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_unique_constraints(req).getUniqueConstraints();
+   }
+ 
+   @Override
+   public List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest req)
+     throws MetaException, NoSuchObjectException, TException {
+     return client.get_not_null_constraints(req).getNotNullConstraints();
+   }
+ 
+   @Override
+   public List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest req)
+       throws MetaException, NoSuchObjectException, TException {
+     return client.get_default_constraints(req).getDefaultConstraints();
+   }
+ 
+   @Override
+   public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest request) throws
+       MetaException, NoSuchObjectException, TException {
+     return client.get_check_constraints(request).getCheckConstraints();
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   @Deprecated
+   //use setPartitionColumnStatistics instead
+   public boolean updateTableColumnStatistics(ColumnStatistics statsObj)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.update_table_column_statistics(statsObj);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   @Deprecated
+   //use setPartitionColumnStatistics instead
+   public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.update_partition_column_statistics(statsObj);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException{
+     return client.set_aggr_stats_for(request);
+   }
+ 
+   @Override
+   public void flushCache() {
+     try {
+       client.flushCache();
+     } catch (TException e) {
+       // Not much we can do about it honestly
+       LOG.warn("Got error flushing the cache", e);
+     }
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public List<ColumnStatisticsObj> getTableColumnStatistics(String dbName, String tableName,
+       List<String> colNames) throws NoSuchObjectException, MetaException, TException,
+       InvalidInputException, InvalidObjectException {
+     return client.get_table_statistics_req(
+         new TableStatsRequest(dbName, tableName, colNames)).getTableStats();
+   }
+ 
++  @Override
++  public List<ColumnStatisticsObj> getTableColumnStatistics(
++      String dbName, String tableName, List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    TableStatsRequest tsr = new TableStatsRequest(dbName, tableName, colNames);
++    tsr.setTxnId(txnId);
++    tsr.setValidWriteIdList(validWriteIdList);
++
++    return client.get_table_statistics_req(tsr).getTableStats();
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
+       String dbName, String tableName, List<String> partNames, List<String> colNames)
+           throws NoSuchObjectException, MetaException, TException {
+     return client.get_partitions_statistics_req(
+         new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats();
+   }
+ 
++  @Override
++  public Map<String, List<ColumnStatisticsObj>> getPartitionColumnStatistics(
++      String dbName, String tableName, List<String> partNames,
++      List<String> colNames, long txnId, String validWriteIdList)
++      throws NoSuchObjectException, MetaException, TException {
++    PartitionsStatsRequest psr = new PartitionsStatsRequest(dbName, tableName, colNames, partNames);
++    psr.setTxnId(txnId);
++    psr.setValidWriteIdList(validWriteIdList);
++    return client.get_partitions_statistics_req(
++        psr).getPartStats();
++  }
++
+   /** {@inheritDoc} */
+   @Override
+   public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName,
+     String colName) throws NoSuchObjectException, InvalidObjectException, MetaException,
+     TException, InvalidInputException
+   {
+     return client.delete_partition_column_statistics(dbName, tableName, partName, colName);
+   }
+ 
+   /** {@inheritDoc} */
+   @Override
+   public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName)
+     throws NoSuchObjectException, InvalidObjectException, MetaException, TException,
+     InvalidInputException
+   {
+     return client.delete_table_column_statistics(dbName, tableName, colName);
+   }
+ 
+   /**
+    * @param db
+    * @param tableName
+    * @throws UnknownTableException
+    * @throws UnknownDBException
+    * @throws MetaException
+    * @throws TException
+    * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String,
+    *      java.lang.String)
+    */
+   @Override
+   public List<FieldSchema> getSchema(String db, String tableName)
+       throws MetaException, TException, UnknownTableException,
+       UnknownDBException {
+       EnvironmentContext envCxt = null;
+       String addedJars = MetastoreConf.getVar(conf, ConfVars.ADDED_JARS);
+       if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) {
+          Map<String, String> props = new HashMap<String, String>();
+          props.put("hive.added.jars.path", addedJars);
+          envCxt = new EnvironmentContext(props);
+        }
+ 
+     List<FieldSchema> fields = client.get_schema_with_environment_context(db, tableName, envCxt);
+     return fastpath ? fields : deepCopyFieldSchemas(fields);
+   }
+ 
+   @Override
+   public String getConfigValue(String name, String defaultValue)
+       throws TException, ConfigValSecurityException {
+     return client.get_config_value(name, defaultValue);
+   }
+ 
+   @Override
+   public Partition getPartition(String db, String tableName, String partName)
+       throws MetaException, TException, UnknownTableException, NoSuchObjectException {
+     Partition p = client.get_partition_by_name(db, tableName, partName);
+     return fastpath ? p : deepCopy(filterHook.filterPartition(p));
+   }
+ 
+   public Partition appendPartitionByName(String dbName, String tableName, String partName)
+       throws InvalidObjectException, AlreadyExistsException, MetaException, TException {
+     return appendPartitionByName(dbName, tableName, partName, null);
+   }
+ 
+   public Partition appendPartitionByName(String dbName, String tableName, String partName,
+       EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException,
+       MetaException, TException {
+     Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName,
+         partName, envContext);
+     return fastpath ? p : deepCopy(p);
+   }
+ 
+   public boolean dropPartitionByName(String dbName, String tableName, String partName,
+       boolean deleteData) throws NoSuchObjectException, MetaException, TException {
+     return dropPartitionByName(dbName, tableName, partName, deleteData, null);
+   }
+ 
+   public boolean dropPartitionByName(String dbName, String tableName, String partName,
+       boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException,
+       MetaException, TException {
+     return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName,
+         deleteData, envContext);
+   }
+ 
+   private HiveMetaHook getHook(Table tbl) throws MetaException {
+     if (hookLoader == null) {
+       return null;
+     }
+     return hookLoader.getHook(tbl);
+   }
+ 
+   @Override
+   public List<String> partitionNameToVals(String name) throws MetaException, TException {
+     return client.partition_name_to_vals(name);
+   }
+ 
+   @Override
+   public Map<String, String> partitionNameToSpec(String name) throws MetaException, TException{
+     return client.partition_name_to_spec(name);
+   }
+ 
+   /**
+    * @param partition
+    * @return
+    */
+   private Partition deepCopy(Partition partition) {
+     Partition copy = null;
+     if (partition != null) {
+       copy = new Partition(partition);
+     }
+     return copy;
+   }
+ 
+   private Database deepCopy(Database database) {
+     Database copy = null;
+     if (database != null) {
+       copy = new Database(database);
+     }
+     return copy;
+   }
+ 
+   protected Table deepCopy(Table table) {
+     Table copy = null;
+     if (table != null) {
+       copy = new Table(table);
+     }
+     return copy;
+   }
+ 
+   private Type deepCopy(Type type) {
+     Type copy = null;
+     if (type != null) {
+       copy = new Type(type);
+     }
+     return copy;
+   }
+ 
+   private FieldSchema deepCopy(FieldSchema schema) {
+     FieldSchema copy = null;
+     if (schema != null) {
+       copy = new FieldSchema(schema);
+     }
+     return copy;
+   }
+ 
+   private Function deepCopy(Function func) {
+     Function copy = null;
+     if (func != null) {
+       copy = new Function(func);
+     }
+     return copy;
+   }
+ 
+   protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) {
+     PrincipalPrivilegeSet copy = null;
+     if (pps != null) {
+       copy = new PrincipalPrivilegeSet(pps);
+     }
+     return copy;
+   }
+ 
+   private List<Partition> deepCopyPartitions(List<Partition> partitions) {
+     return deepCopyPartitions(partitions, null);
+   }
+ 
+   private List<Partition> deepCopyPartitions(
+       Collection<Partition> src, List<Partition> dest) {
+     if (src == null) {
+       return dest;
+     }
+     if (dest == null) {
+       dest = new ArrayList<Partition>(src.size());
+     }
+     for (Partition part : src) {
+       dest.add(deepCopy(part));
+     }
+     return dest;
+   }
+ 
+   private List<Table> deepCopyTables(List<Table> tables) {
+     List<Table> copy = null;
+     if (tables != null) {
+       copy = new ArrayList<Table>();
+       for (Table tab : tables) {
+         copy.add(deepCopy(tab));
+       }
+     }
+     return copy;
+   }
+ 
+   protected List<FieldSchema> deepCopyFieldSchemas(List<FieldSchema> schemas) {
+     List<FieldSchema> copy = null;
+     if (schemas != null) {
+       copy = new ArrayList<FieldSchema>();
+       for (FieldSchema schema : schemas) {
+         copy.add(deepCopy(schema));
+       }
+     }
+     return copy;
+   }
+ 
+   @Override
+   public boolean grant_role(String roleName, String userName,
+       PrincipalType principalType, String grantor, PrincipalType grantorType,
+       boolean grantOption) throws MetaException, TException {
+     GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+     req.setRequestType(GrantRevokeType.GRANT);
+     req.setRoleName(roleName);
+     req.setPrincipalName(userName);
+     req.setPrincipalType(principalType);
+     req.setGrantor(grantor);
+     req.setGrantorType(grantorType);
+     req.setGrantOption(grantOption);
+     GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean create_role(Role role)
+       throws MetaException, TException {
+     return client.create_role(role);
+   }
+ 
+   @Override
+   public boolean drop_role(String roleName) throws MetaException, TException {
+     return client.drop_role(roleName);
+   }
+ 
+   @Override
+   public List<Role> list_roles(String principalName,
+       PrincipalType principalType) throws MetaException, TException {
+     return client.list_roles(principalName, principalType);
+   }
+ 
+   @Override
+   public List<String> listRoleNames() throws MetaException, TException {
+     return client.get_role_names();
+   }
+ 
+   @Override
+   public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req)
+       throws MetaException, TException {
+     return client.get_principals_in_role(req);
+   }
+ 
+   @Override
+   public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(
+       GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException {
+     return client.get_role_grants_for_principal(getRolePrincReq);
+   }
+ 
+   @Override
+   public boolean grant_privileges(PrivilegeBag privileges)
+       throws MetaException, TException {
+     GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+     req.setRequestType(GrantRevokeType.GRANT);
+     req.setPrivileges(privileges);
+     GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean revoke_role(String roleName, String userName,
+       PrincipalType principalType, boolean grantOption) throws MetaException, TException {
+     GrantRevokeRoleRequest req = new GrantRevokeRoleRequest();
+     req.setRequestType(GrantRevokeType.REVOKE);
+     req.setRoleName(roleName);
+     req.setPrincipalName(userName);
+     req.setPrincipalType(principalType);
+     req.setGrantOption(grantOption);
+     GrantRevokeRoleResponse res = client.grant_revoke_role(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException,
+       TException {
+     GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest();
+     req.setRequestType(GrantRevokeType.REVOKE);
+     req.setPrivileges(privileges);
+     req.setRevokeGrantOption(grantOption);
+     GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public boolean refresh_privileges(HiveObjectRef objToRefresh, String authorizer,
+       PrivilegeBag grantPrivileges) throws MetaException,
+       TException {
+     String defaultCat = getDefaultCatalog(conf);
+     objToRefresh.setCatName(defaultCat);
+ 
+     if (grantPrivileges.getPrivileges() != null) {
+       for (HiveObjectPrivilege priv : grantPrivileges.getPrivileges()) {
+         if (!priv.getHiveObject().isSetCatName()) {
+           priv.getHiveObject().setCatName(defaultCat);
+         }
+       }
+     }
+     GrantRevokePrivilegeRequest grantReq = new GrantRevokePrivilegeRequest();
+     grantReq.setRequestType(GrantRevokeType.GRANT);
+     grantReq.setPrivileges(grantPrivileges);
+ 
+     GrantRevokePrivilegeResponse res = client.refresh_privileges(objToRefresh, authorizer, grantReq);
+     if (!res.isSetSuccess()) {
+       throw new MetaException("GrantRevokePrivilegeResponse missing success field");
+     }
+     return res.isSuccess();
+   }
+ 
+   @Override
+   public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject,
+       String userName, List<String> groupNames) throws MetaException,
+       TException {
+     return client.get_privilege_set(hiveObject, userName, groupNames);
+   }
+ 
+   @Override
+   public List<HiveObjectPrivilege> list_privileges(String principalName,
+       PrincipalType principalType, HiveObjectRef hiveObject)
+       throws MetaException, TException {
+     return client.list_privileges(principalName, principalType, hiveObject);
+   }
+ 
+   public String getDelegationToken(String renewerKerberosPrincipalName) throws
+   MetaException, TException, IOException {
+     //a convenience method that makes the intended owner for the delegation
+     //token request the current user
+     String owner = SecurityUtils.getUser();
+     return getDelegationToken(owner, renewerKerberosPrincipalName);
+   }
+ 
+   @Override
+   public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws
+   MetaException, TException {
+     // This is expected to be a no-op, so we will return null when we use local metastore.
+     if (localMetaStore) {
+       return null;
+     }
+     return client.get_delegation_token(owner, renewerKerberosPrincipalName);
+   }
+ 
+   @Override
+   public long renewDelegationToken(String tokenStrForm) throws MetaException, TException {
+     if (localMetaStore) {
+       return 0;
+     }
+     return client.renew_delegation_token(tokenStrForm);
+ 
+   }
+ 
+   @Override
+   public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException {
+     if (localMetaStore) {
+       return;
+     }
+     client.cancel_delegation_token(tokenStrForm);
+   }
+ 
+   @Override
+   public boolean addToken(String tokenIdentifier, String delegationToken) throws TException {
+      return client.add_token(tokenIdentifier, delegationToken);
+   }
+ 
+   @Override
+   public boolean removeToken(String tokenIdentifier) throws TException {
+     return client.remove_token(tokenIdentifier);
+   }
+ 
+   @Override
+   public String getToken(String tokenIdentifier) throws TException {
+     return client.get_token(tokenIdentifier);
+   }
+ 
+   @Override
+   public List<String> getAllTokenIdentifiers() throws TException {
+     return client.get_all_token_identifiers();
+   }
+ 
+   @Override
+   public int addMasterKey(String key) throws MetaException, TException {
+     return client.add_master_key(key);
+   }
+ 
+   @Override
+   public void updateMasterKey(Integer seqNo, String key)
+       throws NoSuchObjectException, MetaException, TException {
+     client.update_master_key(seqNo, key);
+   }
+ 
+   @Override
+   public boolean removeMasterKey(Integer keySeq) throws TException {
+     return client.remove_master_key(keySeq);
+   }
+ 
+   @Override
+   public String[] getMasterKeys() throws TException {
+     List<String> keyList = client.get_master_keys();
+     return keyList.toArray(new String[keyList.size()]);
+   }
+ 
+   @Override
+   public ValidTxnList getValidTxns() throws TException {
+     return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0);
+   }
+ 
+   @Override
+   public ValidTxnList getValidTxns(long currentTxn) throws TException {
+     return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn);
+   }
+ 
+   @Override
+   public ValidWriteIdList getValidWriteIds(String fullTableName) throws TException {
+     GetValidWriteIdsRequest rqst = new GetValidWriteIdsRequest(Collections.singletonList(fullTableName), null);
+     GetValidWriteIdsResponse validWriteIds = client.get_valid_write_ids(rqst);
+     return TxnUtils.createValidReaderWr

<TRUNCATED>

[25/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
new file mode 100644
index 0000000..9688297
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java
@@ -0,0 +1,750 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetOpenTxnsResponse implements org.apache.thrift.TBase<GetOpenTxnsResponse, GetOpenTxnsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetOpenTxnsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsResponse");
+
+  private static final org.apache.thrift.protocol.TField TXN_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_high_water_mark", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField OPEN_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("open_txns", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField MIN_OPEN_TXN_FIELD_DESC = new org.apache.thrift.protocol.TField("min_open_txn", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField ABORTED_BITS_FIELD_DESC = new org.apache.thrift.protocol.TField("abortedBits", org.apache.thrift.protocol.TType.STRING, (short)4);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetOpenTxnsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetOpenTxnsResponseTupleSchemeFactory());
+  }
+
+  private long txn_high_water_mark; // required
+  private List<Long> open_txns; // required
+  private long min_open_txn; // optional
+  private ByteBuffer abortedBits; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXN_HIGH_WATER_MARK((short)1, "txn_high_water_mark"),
+    OPEN_TXNS((short)2, "open_txns"),
+    MIN_OPEN_TXN((short)3, "min_open_txn"),
+    ABORTED_BITS((short)4, "abortedBits");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXN_HIGH_WATER_MARK
+          return TXN_HIGH_WATER_MARK;
+        case 2: // OPEN_TXNS
+          return OPEN_TXNS;
+        case 3: // MIN_OPEN_TXN
+          return MIN_OPEN_TXN;
+        case 4: // ABORTED_BITS
+          return ABORTED_BITS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXN_HIGH_WATER_MARK_ISSET_ID = 0;
+  private static final int __MIN_OPEN_TXN_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.MIN_OPEN_TXN};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXN_HIGH_WATER_MARK, new org.apache.thrift.meta_data.FieldMetaData("txn_high_water_mark", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.OPEN_TXNS, new org.apache.thrift.meta_data.FieldMetaData("open_txns", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.MIN_OPEN_TXN, new org.apache.thrift.meta_data.FieldMetaData("min_open_txn", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.ABORTED_BITS, new org.apache.thrift.meta_data.FieldMetaData("abortedBits", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenTxnsResponse.class, metaDataMap);
+  }
+
+  public GetOpenTxnsResponse() {
+  }
+
+  public GetOpenTxnsResponse(
+    long txn_high_water_mark,
+    List<Long> open_txns,
+    ByteBuffer abortedBits)
+  {
+    this();
+    this.txn_high_water_mark = txn_high_water_mark;
+    setTxn_high_water_markIsSet(true);
+    this.open_txns = open_txns;
+    this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetOpenTxnsResponse(GetOpenTxnsResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.txn_high_water_mark = other.txn_high_water_mark;
+    if (other.isSetOpen_txns()) {
+      List<Long> __this__open_txns = new ArrayList<Long>(other.open_txns);
+      this.open_txns = __this__open_txns;
+    }
+    this.min_open_txn = other.min_open_txn;
+    if (other.isSetAbortedBits()) {
+      this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(other.abortedBits);
+    }
+  }
+
+  public GetOpenTxnsResponse deepCopy() {
+    return new GetOpenTxnsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setTxn_high_water_markIsSet(false);
+    this.txn_high_water_mark = 0;
+    this.open_txns = null;
+    setMin_open_txnIsSet(false);
+    this.min_open_txn = 0;
+    this.abortedBits = null;
+  }
+
+  public long getTxn_high_water_mark() {
+    return this.txn_high_water_mark;
+  }
+
+  public void setTxn_high_water_mark(long txn_high_water_mark) {
+    this.txn_high_water_mark = txn_high_water_mark;
+    setTxn_high_water_markIsSet(true);
+  }
+
+  public void unsetTxn_high_water_mark() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXN_HIGH_WATER_MARK_ISSET_ID);
+  }
+
+  /** Returns true if field txn_high_water_mark is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxn_high_water_mark() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXN_HIGH_WATER_MARK_ISSET_ID);
+  }
+
+  public void setTxn_high_water_markIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXN_HIGH_WATER_MARK_ISSET_ID, value);
+  }
+
+  public int getOpen_txnsSize() {
+    return (this.open_txns == null) ? 0 : this.open_txns.size();
+  }
+
+  public java.util.Iterator<Long> getOpen_txnsIterator() {
+    return (this.open_txns == null) ? null : this.open_txns.iterator();
+  }
+
+  public void addToOpen_txns(long elem) {
+    if (this.open_txns == null) {
+      this.open_txns = new ArrayList<Long>();
+    }
+    this.open_txns.add(elem);
+  }
+
+  public List<Long> getOpen_txns() {
+    return this.open_txns;
+  }
+
+  public void setOpen_txns(List<Long> open_txns) {
+    this.open_txns = open_txns;
+  }
+
+  public void unsetOpen_txns() {
+    this.open_txns = null;
+  }
+
+  /** Returns true if field open_txns is set (has been assigned a value) and false otherwise */
+  public boolean isSetOpen_txns() {
+    return this.open_txns != null;
+  }
+
+  public void setOpen_txnsIsSet(boolean value) {
+    if (!value) {
+      this.open_txns = null;
+    }
+  }
+
+  public long getMin_open_txn() {
+    return this.min_open_txn;
+  }
+
+  public void setMin_open_txn(long min_open_txn) {
+    this.min_open_txn = min_open_txn;
+    setMin_open_txnIsSet(true);
+  }
+
+  public void unsetMin_open_txn() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MIN_OPEN_TXN_ISSET_ID);
+  }
+
+  /** Returns true if field min_open_txn is set (has been assigned a value) and false otherwise */
+  public boolean isSetMin_open_txn() {
+    return EncodingUtils.testBit(__isset_bitfield, __MIN_OPEN_TXN_ISSET_ID);
+  }
+
+  public void setMin_open_txnIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MIN_OPEN_TXN_ISSET_ID, value);
+  }
+
+  public byte[] getAbortedBits() {
+    setAbortedBits(org.apache.thrift.TBaseHelper.rightSize(abortedBits));
+    return abortedBits == null ? null : abortedBits.array();
+  }
+
+  public ByteBuffer bufferForAbortedBits() {
+    return org.apache.thrift.TBaseHelper.copyBinary(abortedBits);
+  }
+
+  public void setAbortedBits(byte[] abortedBits) {
+    this.abortedBits = abortedBits == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(abortedBits, abortedBits.length));
+  }
+
+  public void setAbortedBits(ByteBuffer abortedBits) {
+    this.abortedBits = org.apache.thrift.TBaseHelper.copyBinary(abortedBits);
+  }
+
+  public void unsetAbortedBits() {
+    this.abortedBits = null;
+  }
+
+  /** Returns true if field abortedBits is set (has been assigned a value) and false otherwise */
+  public boolean isSetAbortedBits() {
+    return this.abortedBits != null;
+  }
+
+  public void setAbortedBitsIsSet(boolean value) {
+    if (!value) {
+      this.abortedBits = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXN_HIGH_WATER_MARK:
+      if (value == null) {
+        unsetTxn_high_water_mark();
+      } else {
+        setTxn_high_water_mark((Long)value);
+      }
+      break;
+
+    case OPEN_TXNS:
+      if (value == null) {
+        unsetOpen_txns();
+      } else {
+        setOpen_txns((List<Long>)value);
+      }
+      break;
+
+    case MIN_OPEN_TXN:
+      if (value == null) {
+        unsetMin_open_txn();
+      } else {
+        setMin_open_txn((Long)value);
+      }
+      break;
+
+    case ABORTED_BITS:
+      if (value == null) {
+        unsetAbortedBits();
+      } else {
+        setAbortedBits((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXN_HIGH_WATER_MARK:
+      return getTxn_high_water_mark();
+
+    case OPEN_TXNS:
+      return getOpen_txns();
+
+    case MIN_OPEN_TXN:
+      return getMin_open_txn();
+
+    case ABORTED_BITS:
+      return getAbortedBits();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXN_HIGH_WATER_MARK:
+      return isSetTxn_high_water_mark();
+    case OPEN_TXNS:
+      return isSetOpen_txns();
+    case MIN_OPEN_TXN:
+      return isSetMin_open_txn();
+    case ABORTED_BITS:
+      return isSetAbortedBits();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetOpenTxnsResponse)
+      return this.equals((GetOpenTxnsResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetOpenTxnsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txn_high_water_mark = true;
+    boolean that_present_txn_high_water_mark = true;
+    if (this_present_txn_high_water_mark || that_present_txn_high_water_mark) {
+      if (!(this_present_txn_high_water_mark && that_present_txn_high_water_mark))
+        return false;
+      if (this.txn_high_water_mark != that.txn_high_water_mark)
+        return false;
+    }
+
+    boolean this_present_open_txns = true && this.isSetOpen_txns();
+    boolean that_present_open_txns = true && that.isSetOpen_txns();
+    if (this_present_open_txns || that_present_open_txns) {
+      if (!(this_present_open_txns && that_present_open_txns))
+        return false;
+      if (!this.open_txns.equals(that.open_txns))
+        return false;
+    }
+
+    boolean this_present_min_open_txn = true && this.isSetMin_open_txn();
+    boolean that_present_min_open_txn = true && that.isSetMin_open_txn();
+    if (this_present_min_open_txn || that_present_min_open_txn) {
+      if (!(this_present_min_open_txn && that_present_min_open_txn))
+        return false;
+      if (this.min_open_txn != that.min_open_txn)
+        return false;
+    }
+
+    boolean this_present_abortedBits = true && this.isSetAbortedBits();
+    boolean that_present_abortedBits = true && that.isSetAbortedBits();
+    if (this_present_abortedBits || that_present_abortedBits) {
+      if (!(this_present_abortedBits && that_present_abortedBits))
+        return false;
+      if (!this.abortedBits.equals(that.abortedBits))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txn_high_water_mark = true;
+    list.add(present_txn_high_water_mark);
+    if (present_txn_high_water_mark)
+      list.add(txn_high_water_mark);
+
+    boolean present_open_txns = true && (isSetOpen_txns());
+    list.add(present_open_txns);
+    if (present_open_txns)
+      list.add(open_txns);
+
+    boolean present_min_open_txn = true && (isSetMin_open_txn());
+    list.add(present_min_open_txn);
+    if (present_min_open_txn)
+      list.add(min_open_txn);
+
+    boolean present_abortedBits = true && (isSetAbortedBits());
+    list.add(present_abortedBits);
+    if (present_abortedBits)
+      list.add(abortedBits);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetOpenTxnsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxn_high_water_mark()).compareTo(other.isSetTxn_high_water_mark());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxn_high_water_mark()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txn_high_water_mark, other.txn_high_water_mark);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOpen_txns()).compareTo(other.isSetOpen_txns());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOpen_txns()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.open_txns, other.open_txns);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMin_open_txn()).compareTo(other.isSetMin_open_txn());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMin_open_txn()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.min_open_txn, other.min_open_txn);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetAbortedBits()).compareTo(other.isSetAbortedBits());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetAbortedBits()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.abortedBits, other.abortedBits);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetOpenTxnsResponse(");
+    boolean first = true;
+
+    sb.append("txn_high_water_mark:");
+    sb.append(this.txn_high_water_mark);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("open_txns:");
+    if (this.open_txns == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.open_txns);
+    }
+    first = false;
+    if (isSetMin_open_txn()) {
+      if (!first) sb.append(", ");
+      sb.append("min_open_txn:");
+      sb.append(this.min_open_txn);
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("abortedBits:");
+    if (this.abortedBits == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.abortedBits, sb);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxn_high_water_mark()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txn_high_water_mark' is unset! Struct:" + toString());
+    }
+
+    if (!isSetOpen_txns()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'open_txns' is unset! Struct:" + toString());
+    }
+
+    if (!isSetAbortedBits()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'abortedBits' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetOpenTxnsResponseStandardSchemeFactory implements SchemeFactory {
+    public GetOpenTxnsResponseStandardScheme getScheme() {
+      return new GetOpenTxnsResponseStandardScheme();
+    }
+  }
+
+  private static class GetOpenTxnsResponseStandardScheme extends StandardScheme<GetOpenTxnsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXN_HIGH_WATER_MARK
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txn_high_water_mark = iprot.readI64();
+              struct.setTxn_high_water_markIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // OPEN_TXNS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list562 = iprot.readListBegin();
+                struct.open_txns = new ArrayList<Long>(_list562.size);
+                long _elem563;
+                for (int _i564 = 0; _i564 < _list562.size; ++_i564)
+                {
+                  _elem563 = iprot.readI64();
+                  struct.open_txns.add(_elem563);
+                }
+                iprot.readListEnd();
+              }
+              struct.setOpen_txnsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // MIN_OPEN_TXN
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.min_open_txn = iprot.readI64();
+              struct.setMin_open_txnIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // ABORTED_BITS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.abortedBits = iprot.readBinary();
+              struct.setAbortedBitsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TXN_HIGH_WATER_MARK_FIELD_DESC);
+      oprot.writeI64(struct.txn_high_water_mark);
+      oprot.writeFieldEnd();
+      if (struct.open_txns != null) {
+        oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.open_txns.size()));
+          for (long _iter565 : struct.open_txns)
+          {
+            oprot.writeI64(_iter565);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetMin_open_txn()) {
+        oprot.writeFieldBegin(MIN_OPEN_TXN_FIELD_DESC);
+        oprot.writeI64(struct.min_open_txn);
+        oprot.writeFieldEnd();
+      }
+      if (struct.abortedBits != null) {
+        oprot.writeFieldBegin(ABORTED_BITS_FIELD_DESC);
+        oprot.writeBinary(struct.abortedBits);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetOpenTxnsResponseTupleSchemeFactory implements SchemeFactory {
+    public GetOpenTxnsResponseTupleScheme getScheme() {
+      return new GetOpenTxnsResponseTupleScheme();
+    }
+  }
+
+  private static class GetOpenTxnsResponseTupleScheme extends TupleScheme<GetOpenTxnsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.txn_high_water_mark);
+      {
+        oprot.writeI32(struct.open_txns.size());
+        for (long _iter566 : struct.open_txns)
+        {
+          oprot.writeI64(_iter566);
+        }
+      }
+      oprot.writeBinary(struct.abortedBits);
+      BitSet optionals = new BitSet();
+      if (struct.isSetMin_open_txn()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMin_open_txn()) {
+        oprot.writeI64(struct.min_open_txn);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.txn_high_water_mark = iprot.readI64();
+      struct.setTxn_high_water_markIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list567 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.open_txns = new ArrayList<Long>(_list567.size);
+        long _elem568;
+        for (int _i569 = 0; _i569 < _list567.size; ++_i569)
+        {
+          _elem568 = iprot.readI64();
+          struct.open_txns.add(_elem568);
+        }
+      }
+      struct.setOpen_txnsIsSet(true);
+      struct.abortedBits = iprot.readBinary();
+      struct.setAbortedBitsIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.min_open_txn = iprot.readI64();
+        struct.setMin_open_txnIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
new file mode 100644
index 0000000..cdb1db8
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleRequest.java
@@ -0,0 +1,389 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPrincipalsInRoleRequest implements org.apache.thrift.TBase<GetPrincipalsInRoleRequest, GetPrincipalsInRoleRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetPrincipalsInRoleRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrincipalsInRoleRequest");
+
+  private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetPrincipalsInRoleRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetPrincipalsInRoleRequestTupleSchemeFactory());
+  }
+
+  private String roleName; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ROLE_NAME((short)1, "roleName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ROLE_NAME
+          return ROLE_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ROLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("roleName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPrincipalsInRoleRequest.class, metaDataMap);
+  }
+
+  public GetPrincipalsInRoleRequest() {
+  }
+
+  public GetPrincipalsInRoleRequest(
+    String roleName)
+  {
+    this();
+    this.roleName = roleName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetPrincipalsInRoleRequest(GetPrincipalsInRoleRequest other) {
+    if (other.isSetRoleName()) {
+      this.roleName = other.roleName;
+    }
+  }
+
+  public GetPrincipalsInRoleRequest deepCopy() {
+    return new GetPrincipalsInRoleRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.roleName = null;
+  }
+
+  public String getRoleName() {
+    return this.roleName;
+  }
+
+  public void setRoleName(String roleName) {
+    this.roleName = roleName;
+  }
+
+  public void unsetRoleName() {
+    this.roleName = null;
+  }
+
+  /** Returns true if field roleName is set (has been assigned a value) and false otherwise */
+  public boolean isSetRoleName() {
+    return this.roleName != null;
+  }
+
+  public void setRoleNameIsSet(boolean value) {
+    if (!value) {
+      this.roleName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ROLE_NAME:
+      if (value == null) {
+        unsetRoleName();
+      } else {
+        setRoleName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ROLE_NAME:
+      return getRoleName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ROLE_NAME:
+      return isSetRoleName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetPrincipalsInRoleRequest)
+      return this.equals((GetPrincipalsInRoleRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetPrincipalsInRoleRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_roleName = true && this.isSetRoleName();
+    boolean that_present_roleName = true && that.isSetRoleName();
+    if (this_present_roleName || that_present_roleName) {
+      if (!(this_present_roleName && that_present_roleName))
+        return false;
+      if (!this.roleName.equals(that.roleName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_roleName = true && (isSetRoleName());
+    list.add(present_roleName);
+    if (present_roleName)
+      list.add(roleName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetPrincipalsInRoleRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetRoleName()).compareTo(other.isSetRoleName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRoleName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.roleName, other.roleName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetPrincipalsInRoleRequest(");
+    boolean first = true;
+
+    sb.append("roleName:");
+    if (this.roleName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.roleName);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetRoleName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'roleName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetPrincipalsInRoleRequestStandardSchemeFactory implements SchemeFactory {
+    public GetPrincipalsInRoleRequestStandardScheme getScheme() {
+      return new GetPrincipalsInRoleRequestStandardScheme();
+    }
+  }
+
+  private static class GetPrincipalsInRoleRequestStandardScheme extends StandardScheme<GetPrincipalsInRoleRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrincipalsInRoleRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ROLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.roleName = iprot.readString();
+              struct.setRoleNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrincipalsInRoleRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.roleName != null) {
+        oprot.writeFieldBegin(ROLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.roleName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetPrincipalsInRoleRequestTupleSchemeFactory implements SchemeFactory {
+    public GetPrincipalsInRoleRequestTupleScheme getScheme() {
+      return new GetPrincipalsInRoleRequestTupleScheme();
+    }
+  }
+
+  private static class GetPrincipalsInRoleRequestTupleScheme extends TupleScheme<GetPrincipalsInRoleRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRoleRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.roleName);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRoleRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.roleName = iprot.readString();
+      struct.setRoleNameIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
new file mode 100644
index 0000000..f116fd9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetPrincipalsInRoleResponse implements org.apache.thrift.TBase<GetPrincipalsInRoleResponse, GetPrincipalsInRoleResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetPrincipalsInRoleResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrincipalsInRoleResponse");
+
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_GRANTS_FIELD_DESC = new org.apache.thrift.protocol.TField("principalGrants", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetPrincipalsInRoleResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetPrincipalsInRoleResponseTupleSchemeFactory());
+  }
+
+  private List<RolePrincipalGrant> principalGrants; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRINCIPAL_GRANTS((short)1, "principalGrants");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRINCIPAL_GRANTS
+          return PRINCIPAL_GRANTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRINCIPAL_GRANTS, new org.apache.thrift.meta_data.FieldMetaData("principalGrants", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, RolePrincipalGrant.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPrincipalsInRoleResponse.class, metaDataMap);
+  }
+
+  public GetPrincipalsInRoleResponse() {
+  }
+
+  public GetPrincipalsInRoleResponse(
+    List<RolePrincipalGrant> principalGrants)
+  {
+    this();
+    this.principalGrants = principalGrants;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetPrincipalsInRoleResponse(GetPrincipalsInRoleResponse other) {
+    if (other.isSetPrincipalGrants()) {
+      List<RolePrincipalGrant> __this__principalGrants = new ArrayList<RolePrincipalGrant>(other.principalGrants.size());
+      for (RolePrincipalGrant other_element : other.principalGrants) {
+        __this__principalGrants.add(new RolePrincipalGrant(other_element));
+      }
+      this.principalGrants = __this__principalGrants;
+    }
+  }
+
+  public GetPrincipalsInRoleResponse deepCopy() {
+    return new GetPrincipalsInRoleResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.principalGrants = null;
+  }
+
+  public int getPrincipalGrantsSize() {
+    return (this.principalGrants == null) ? 0 : this.principalGrants.size();
+  }
+
+  public java.util.Iterator<RolePrincipalGrant> getPrincipalGrantsIterator() {
+    return (this.principalGrants == null) ? null : this.principalGrants.iterator();
+  }
+
+  public void addToPrincipalGrants(RolePrincipalGrant elem) {
+    if (this.principalGrants == null) {
+      this.principalGrants = new ArrayList<RolePrincipalGrant>();
+    }
+    this.principalGrants.add(elem);
+  }
+
+  public List<RolePrincipalGrant> getPrincipalGrants() {
+    return this.principalGrants;
+  }
+
+  public void setPrincipalGrants(List<RolePrincipalGrant> principalGrants) {
+    this.principalGrants = principalGrants;
+  }
+
+  public void unsetPrincipalGrants() {
+    this.principalGrants = null;
+  }
+
+  /** Returns true if field principalGrants is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalGrants() {
+    return this.principalGrants != null;
+  }
+
+  public void setPrincipalGrantsIsSet(boolean value) {
+    if (!value) {
+      this.principalGrants = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRINCIPAL_GRANTS:
+      if (value == null) {
+        unsetPrincipalGrants();
+      } else {
+        setPrincipalGrants((List<RolePrincipalGrant>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRINCIPAL_GRANTS:
+      return getPrincipalGrants();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRINCIPAL_GRANTS:
+      return isSetPrincipalGrants();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetPrincipalsInRoleResponse)
+      return this.equals((GetPrincipalsInRoleResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetPrincipalsInRoleResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_principalGrants = true && this.isSetPrincipalGrants();
+    boolean that_present_principalGrants = true && that.isSetPrincipalGrants();
+    if (this_present_principalGrants || that_present_principalGrants) {
+      if (!(this_present_principalGrants && that_present_principalGrants))
+        return false;
+      if (!this.principalGrants.equals(that.principalGrants))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_principalGrants = true && (isSetPrincipalGrants());
+    list.add(present_principalGrants);
+    if (present_principalGrants)
+      list.add(principalGrants);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetPrincipalsInRoleResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrincipalGrants()).compareTo(other.isSetPrincipalGrants());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalGrants()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalGrants, other.principalGrants);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetPrincipalsInRoleResponse(");
+    boolean first = true;
+
+    sb.append("principalGrants:");
+    if (this.principalGrants == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalGrants);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPrincipalGrants()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'principalGrants' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetPrincipalsInRoleResponseStandardSchemeFactory implements SchemeFactory {
+    public GetPrincipalsInRoleResponseStandardScheme getScheme() {
+      return new GetPrincipalsInRoleResponseStandardScheme();
+    }
+  }
+
+  private static class GetPrincipalsInRoleResponseStandardScheme extends StandardScheme<GetPrincipalsInRoleResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrincipalsInRoleResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRINCIPAL_GRANTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list86 = iprot.readListBegin();
+                struct.principalGrants = new ArrayList<RolePrincipalGrant>(_list86.size);
+                RolePrincipalGrant _elem87;
+                for (int _i88 = 0; _i88 < _list86.size; ++_i88)
+                {
+                  _elem87 = new RolePrincipalGrant();
+                  _elem87.read(iprot);
+                  struct.principalGrants.add(_elem87);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPrincipalGrantsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrincipalsInRoleResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.principalGrants != null) {
+        oprot.writeFieldBegin(PRINCIPAL_GRANTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.principalGrants.size()));
+          for (RolePrincipalGrant _iter89 : struct.principalGrants)
+          {
+            _iter89.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetPrincipalsInRoleResponseTupleSchemeFactory implements SchemeFactory {
+    public GetPrincipalsInRoleResponseTupleScheme getScheme() {
+      return new GetPrincipalsInRoleResponseTupleScheme();
+    }
+  }
+
+  private static class GetPrincipalsInRoleResponseTupleScheme extends TupleScheme<GetPrincipalsInRoleResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRoleResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.principalGrants.size());
+        for (RolePrincipalGrant _iter90 : struct.principalGrants)
+        {
+          _iter90.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRoleResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.principalGrants = new ArrayList<RolePrincipalGrant>(_list91.size);
+        RolePrincipalGrant _elem92;
+        for (int _i93 = 0; _i93 < _list91.size; ++_i93)
+        {
+          _elem92 = new RolePrincipalGrant();
+          _elem92.read(iprot);
+          struct.principalGrants.add(_elem92);
+        }
+      }
+      struct.setPrincipalGrantsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
new file mode 100644
index 0000000..a740ab9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalRequest.java
@@ -0,0 +1,502 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetRoleGrantsForPrincipalRequest implements org.apache.thrift.TBase<GetRoleGrantsForPrincipalRequest, GetRoleGrantsForPrincipalRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetRoleGrantsForPrincipalRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetRoleGrantsForPrincipalRequest");
+
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("principal_name", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("principal_type", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetRoleGrantsForPrincipalRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetRoleGrantsForPrincipalRequestTupleSchemeFactory());
+  }
+
+  private String principal_name; // required
+  private PrincipalType principal_type; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRINCIPAL_NAME((short)1, "principal_name"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    PRINCIPAL_TYPE((short)2, "principal_type");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRINCIPAL_NAME
+          return PRINCIPAL_NAME;
+        case 2: // PRINCIPAL_TYPE
+          return PRINCIPAL_TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRINCIPAL_NAME, new org.apache.thrift.meta_data.FieldMetaData("principal_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PRINCIPAL_TYPE, new org.apache.thrift.meta_data.FieldMetaData("principal_type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetRoleGrantsForPrincipalRequest.class, metaDataMap);
+  }
+
+  public GetRoleGrantsForPrincipalRequest() {
+  }
+
+  public GetRoleGrantsForPrincipalRequest(
+    String principal_name,
+    PrincipalType principal_type)
+  {
+    this();
+    this.principal_name = principal_name;
+    this.principal_type = principal_type;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetRoleGrantsForPrincipalRequest(GetRoleGrantsForPrincipalRequest other) {
+    if (other.isSetPrincipal_name()) {
+      this.principal_name = other.principal_name;
+    }
+    if (other.isSetPrincipal_type()) {
+      this.principal_type = other.principal_type;
+    }
+  }
+
+  public GetRoleGrantsForPrincipalRequest deepCopy() {
+    return new GetRoleGrantsForPrincipalRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.principal_name = null;
+    this.principal_type = null;
+  }
+
+  public String getPrincipal_name() {
+    return this.principal_name;
+  }
+
+  public void setPrincipal_name(String principal_name) {
+    this.principal_name = principal_name;
+  }
+
+  public void unsetPrincipal_name() {
+    this.principal_name = null;
+  }
+
+  /** Returns true if field principal_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipal_name() {
+    return this.principal_name != null;
+  }
+
+  public void setPrincipal_nameIsSet(boolean value) {
+    if (!value) {
+      this.principal_name = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getPrincipal_type() {
+    return this.principal_type;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setPrincipal_type(PrincipalType principal_type) {
+    this.principal_type = principal_type;
+  }
+
+  public void unsetPrincipal_type() {
+    this.principal_type = null;
+  }
+
+  /** Returns true if field principal_type is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipal_type() {
+    return this.principal_type != null;
+  }
+
+  public void setPrincipal_typeIsSet(boolean value) {
+    if (!value) {
+      this.principal_type = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRINCIPAL_NAME:
+      if (value == null) {
+        unsetPrincipal_name();
+      } else {
+        setPrincipal_name((String)value);
+      }
+      break;
+
+    case PRINCIPAL_TYPE:
+      if (value == null) {
+        unsetPrincipal_type();
+      } else {
+        setPrincipal_type((PrincipalType)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRINCIPAL_NAME:
+      return getPrincipal_name();
+
+    case PRINCIPAL_TYPE:
+      return getPrincipal_type();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRINCIPAL_NAME:
+      return isSetPrincipal_name();
+    case PRINCIPAL_TYPE:
+      return isSetPrincipal_type();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetRoleGrantsForPrincipalRequest)
+      return this.equals((GetRoleGrantsForPrincipalRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetRoleGrantsForPrincipalRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_principal_name = true && this.isSetPrincipal_name();
+    boolean that_present_principal_name = true && that.isSetPrincipal_name();
+    if (this_present_principal_name || that_present_principal_name) {
+      if (!(this_present_principal_name && that_present_principal_name))
+        return false;
+      if (!this.principal_name.equals(that.principal_name))
+        return false;
+    }
+
+    boolean this_present_principal_type = true && this.isSetPrincipal_type();
+    boolean that_present_principal_type = true && that.isSetPrincipal_type();
+    if (this_present_principal_type || that_present_principal_type) {
+      if (!(this_present_principal_type && that_present_principal_type))
+        return false;
+      if (!this.principal_type.equals(that.principal_type))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_principal_name = true && (isSetPrincipal_name());
+    list.add(present_principal_name);
+    if (present_principal_name)
+      list.add(principal_name);
+
+    boolean present_principal_type = true && (isSetPrincipal_type());
+    list.add(present_principal_type);
+    if (present_principal_type)
+      list.add(principal_type.getValue());
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetRoleGrantsForPrincipalRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrincipal_name()).compareTo(other.isSetPrincipal_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipal_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principal_name, other.principal_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipal_type()).compareTo(other.isSetPrincipal_type());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipal_type()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principal_type, other.principal_type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetRoleGrantsForPrincipalRequest(");
+    boolean first = true;
+
+    sb.append("principal_name:");
+    if (this.principal_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principal_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principal_type:");
+    if (this.principal_type == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principal_type);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPrincipal_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'principal_name' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPrincipal_type()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'principal_type' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetRoleGrantsForPrincipalRequestStandardSchemeFactory implements SchemeFactory {
+    public GetRoleGrantsForPrincipalRequestStandardScheme getScheme() {
+      return new GetRoleGrantsForPrincipalRequestStandardScheme();
+    }
+  }
+
+  private static class GetRoleGrantsForPrincipalRequestStandardScheme extends StandardScheme<GetRoleGrantsForPrincipalRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetRoleGrantsForPrincipalRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRINCIPAL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.principal_name = iprot.readString();
+              struct.setPrincipal_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PRINCIPAL_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.principal_type = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setPrincipal_typeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetRoleGrantsForPrincipalRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.principal_name != null) {
+        oprot.writeFieldBegin(PRINCIPAL_NAME_FIELD_DESC);
+        oprot.writeString(struct.principal_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principal_type != null) {
+        oprot.writeFieldBegin(PRINCIPAL_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.principal_type.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetRoleGrantsForPrincipalRequestTupleSchemeFactory implements SchemeFactory {
+    public GetRoleGrantsForPrincipalRequestTupleScheme getScheme() {
+      return new GetRoleGrantsForPrincipalRequestTupleScheme();
+    }
+  }
+
+  private static class GetRoleGrantsForPrincipalRequestTupleScheme extends TupleScheme<GetRoleGrantsForPrincipalRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPrincipalRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.principal_name);
+      oprot.writeI32(struct.principal_type.getValue());
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPrincipalRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.principal_name = iprot.readString();
+      struct.setPrincipal_nameIsSet(true);
+      struct.principal_type = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+      struct.setPrincipal_typeIsSet(true);
+    }
+  }
+
+}
+


[83/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
index 0000000,a663a64..c9b70a4
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java
@@@ -1,0 -1,750 +1,961 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class TableStatsRequest implements org.apache.thrift.TBase<TableStatsRequest, TableStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<TableStatsRequest> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsRequest");
+ 
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new TableStatsRequestStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new TableStatsRequestTupleSchemeFactory());
+   }
+ 
+   private String dbName; // required
+   private String tblName; // required
+   private List<String> colNames; // required
+   private String catName; // optional
++  private long txnId; // optional
++  private String validWriteIdList; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     DB_NAME((short)1, "dbName"),
+     TBL_NAME((short)2, "tblName"),
+     COL_NAMES((short)3, "colNames"),
 -    CAT_NAME((short)4, "catName");
++    CAT_NAME((short)4, "catName"),
++    TXN_ID((short)5, "txnId"),
++    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // DB_NAME
+           return DB_NAME;
+         case 2: // TBL_NAME
+           return TBL_NAME;
+         case 3: // COL_NAMES
+           return COL_NAMES;
+         case 4: // CAT_NAME
+           return CAT_NAME;
++        case 5: // TXN_ID
++          return TXN_ID;
++        case 6: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
 -  private static final _Fields optionals[] = {_Fields.CAT_NAME};
++  private static final int __TXNID_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.COL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("colNames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsRequest.class, metaDataMap);
+   }
+ 
+   public TableStatsRequest() {
++    this.txnId = -1L;
++
+   }
+ 
+   public TableStatsRequest(
+     String dbName,
+     String tblName,
+     List<String> colNames)
+   {
+     this();
+     this.dbName = dbName;
+     this.tblName = tblName;
+     this.colNames = colNames;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public TableStatsRequest(TableStatsRequest other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetDbName()) {
+       this.dbName = other.dbName;
+     }
+     if (other.isSetTblName()) {
+       this.tblName = other.tblName;
+     }
+     if (other.isSetColNames()) {
+       List<String> __this__colNames = new ArrayList<String>(other.colNames);
+       this.colNames = __this__colNames;
+     }
+     if (other.isSetCatName()) {
+       this.catName = other.catName;
+     }
++    this.txnId = other.txnId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
+   }
+ 
+   public TableStatsRequest deepCopy() {
+     return new TableStatsRequest(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.dbName = null;
+     this.tblName = null;
+     this.colNames = null;
+     this.catName = null;
++    this.txnId = -1L;
++
++    this.validWriteIdList = null;
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = dbName;
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getTblName() {
+     return this.tblName;
+   }
+ 
+   public void setTblName(String tblName) {
+     this.tblName = tblName;
+   }
+ 
+   public void unsetTblName() {
+     this.tblName = null;
+   }
+ 
+   /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTblName() {
+     return this.tblName != null;
+   }
+ 
+   public void setTblNameIsSet(boolean value) {
+     if (!value) {
+       this.tblName = null;
+     }
+   }
+ 
+   public int getColNamesSize() {
+     return (this.colNames == null) ? 0 : this.colNames.size();
+   }
+ 
+   public java.util.Iterator<String> getColNamesIterator() {
+     return (this.colNames == null) ? null : this.colNames.iterator();
+   }
+ 
+   public void addToColNames(String elem) {
+     if (this.colNames == null) {
+       this.colNames = new ArrayList<String>();
+     }
+     this.colNames.add(elem);
+   }
+ 
+   public List<String> getColNames() {
+     return this.colNames;
+   }
+ 
+   public void setColNames(List<String> colNames) {
+     this.colNames = colNames;
+   }
+ 
+   public void unsetColNames() {
+     this.colNames = null;
+   }
+ 
+   /** Returns true if field colNames is set (has been assigned a value) and false otherwise */
+   public boolean isSetColNames() {
+     return this.colNames != null;
+   }
+ 
+   public void setColNamesIsSet(boolean value) {
+     if (!value) {
+       this.colNames = null;
+     }
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = catName;
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case TBL_NAME:
+       if (value == null) {
+         unsetTblName();
+       } else {
+         setTblName((String)value);
+       }
+       break;
+ 
+     case COL_NAMES:
+       if (value == null) {
+         unsetColNames();
+       } else {
+         setColNames((List<String>)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case DB_NAME:
+       return getDbName();
+ 
+     case TBL_NAME:
+       return getTblName();
+ 
+     case COL_NAMES:
+       return getColNames();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
++    case TXN_ID:
++      return getTxnId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case DB_NAME:
+       return isSetDbName();
+     case TBL_NAME:
+       return isSetTblName();
+     case COL_NAMES:
+       return isSetColNames();
+     case CAT_NAME:
+       return isSetCatName();
++    case TXN_ID:
++      return isSetTxnId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof TableStatsRequest)
+       return this.equals((TableStatsRequest)that);
+     return false;
+   }
+ 
+   public boolean equals(TableStatsRequest that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_tblName = true && this.isSetTblName();
+     boolean that_present_tblName = true && that.isSetTblName();
+     if (this_present_tblName || that_present_tblName) {
+       if (!(this_present_tblName && that_present_tblName))
+         return false;
+       if (!this.tblName.equals(that.tblName))
+         return false;
+     }
+ 
+     boolean this_present_colNames = true && this.isSetColNames();
+     boolean that_present_colNames = true && that.isSetColNames();
+     if (this_present_colNames || that_present_colNames) {
+       if (!(this_present_colNames && that_present_colNames))
+         return false;
+       if (!this.colNames.equals(that.colNames))
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_tblName = true && (isSetTblName());
+     list.add(present_tblName);
+     if (present_tblName)
+       list.add(tblName);
+ 
+     boolean present_colNames = true && (isSetColNames());
+     list.add(present_colNames);
+     if (present_colNames)
+       list.add(colNames);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(TableStatsRequest other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTblName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetColNames()).compareTo(other.isSetColNames());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetColNames()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colNames, other.colNames);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("TableStatsRequest(");
+     boolean first = true;
+ 
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tblName:");
+     if (this.tblName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tblName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("colNames:");
+     if (this.colNames == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.colNames);
+     }
+     first = false;
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetDbName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetTblName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetColNames()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'colNames' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class TableStatsRequestStandardSchemeFactory implements SchemeFactory {
+     public TableStatsRequestStandardScheme getScheme() {
+       return new TableStatsRequestStandardScheme();
+     }
+   }
+ 
+   private static class TableStatsRequestStandardScheme extends StandardScheme<TableStatsRequest> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = iprot.readString();
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // TBL_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tblName = iprot.readString();
+               struct.setTblNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // COL_NAMES
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list450 = iprot.readListBegin();
+                 struct.colNames = new ArrayList<String>(_list450.size);
+                 String _elem451;
+                 for (int _i452 = 0; _i452 < _list450.size; ++_i452)
+                 {
+                   _elem451 = iprot.readString();
+                   struct.colNames.add(_elem451);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setColNamesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = iprot.readString();
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 5: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 6: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tblName != null) {
+         oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+         oprot.writeString(struct.tblName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.colNames != null) {
+         oprot.writeFieldBegin(COL_NAMES_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size()));
+           for (String _iter453 : struct.colNames)
+           {
+             oprot.writeString(_iter453);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class TableStatsRequestTupleSchemeFactory implements SchemeFactory {
+     public TableStatsRequestTupleScheme getScheme() {
+       return new TableStatsRequestTupleScheme();
+     }
+   }
+ 
+   private static class TableStatsRequestTupleScheme extends TupleScheme<TableStatsRequest> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       oprot.writeString(struct.dbName);
+       oprot.writeString(struct.tblName);
+       {
+         oprot.writeI32(struct.colNames.size());
+         for (String _iter454 : struct.colNames)
+         {
+           oprot.writeString(_iter454);
+         }
+       }
+       BitSet optionals = new BitSet();
+       if (struct.isSetCatName()) {
+         optionals.set(0);
+       }
 -      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetTxnId()) {
++        optionals.set(1);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(2);
++      }
++      oprot.writeBitSet(optionals, 3);
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       struct.dbName = iprot.readString();
+       struct.setDbNameIsSet(true);
+       struct.tblName = iprot.readString();
+       struct.setTblNameIsSet(true);
+       {
+         org.apache.thrift.protocol.TList _list455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+         struct.colNames = new ArrayList<String>(_list455.size);
+         String _elem456;
+         for (int _i457 = 0; _i457 < _list455.size; ++_i457)
+         {
+           _elem456 = iprot.readString();
+           struct.colNames.add(_elem456);
+         }
+       }
+       struct.setColNamesIsSet(true);
 -      BitSet incoming = iprot.readBitSet(1);
++      BitSet incoming = iprot.readBitSet(3);
+       if (incoming.get(0)) {
+         struct.catName = iprot.readString();
+         struct.setCatNameIsSet(true);
+       }
++      if (incoming.get(1)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
index 0000000,dff7d5c..0685a22
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java
@@@ -1,0 -1,443 +1,550 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class TableStatsResult implements org.apache.thrift.TBase<TableStatsResult, TableStatsResult._Fields>, java.io.Serializable, Cloneable, Comparable<TableStatsResult> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TableStatsResult");
+ 
+   private static final org.apache.thrift.protocol.TField TABLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("tableStats", org.apache.thrift.protocol.TType.LIST, (short)1);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new TableStatsResultStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new TableStatsResultTupleSchemeFactory());
+   }
+ 
+   private List<ColumnStatisticsObj> tableStats; // required
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
 -    TABLE_STATS((short)1, "tableStats");
++    TABLE_STATS((short)1, "tableStats"),
++    IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // TABLE_STATS
+           return TABLE_STATS;
++        case 2: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.TABLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("tableStats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TableStatsResult.class, metaDataMap);
+   }
+ 
+   public TableStatsResult() {
+   }
+ 
+   public TableStatsResult(
+     List<ColumnStatisticsObj> tableStats)
+   {
+     this();
+     this.tableStats = tableStats;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public TableStatsResult(TableStatsResult other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetTableStats()) {
+       List<ColumnStatisticsObj> __this__tableStats = new ArrayList<ColumnStatisticsObj>(other.tableStats.size());
+       for (ColumnStatisticsObj other_element : other.tableStats) {
+         __this__tableStats.add(new ColumnStatisticsObj(other_element));
+       }
+       this.tableStats = __this__tableStats;
+     }
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public TableStatsResult deepCopy() {
+     return new TableStatsResult(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.tableStats = null;
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public int getTableStatsSize() {
+     return (this.tableStats == null) ? 0 : this.tableStats.size();
+   }
+ 
+   public java.util.Iterator<ColumnStatisticsObj> getTableStatsIterator() {
+     return (this.tableStats == null) ? null : this.tableStats.iterator();
+   }
+ 
+   public void addToTableStats(ColumnStatisticsObj elem) {
+     if (this.tableStats == null) {
+       this.tableStats = new ArrayList<ColumnStatisticsObj>();
+     }
+     this.tableStats.add(elem);
+   }
+ 
+   public List<ColumnStatisticsObj> getTableStats() {
+     return this.tableStats;
+   }
+ 
+   public void setTableStats(List<ColumnStatisticsObj> tableStats) {
+     this.tableStats = tableStats;
+   }
+ 
+   public void unsetTableStats() {
+     this.tableStats = null;
+   }
+ 
+   /** Returns true if field tableStats is set (has been assigned a value) and false otherwise */
+   public boolean isSetTableStats() {
+     return this.tableStats != null;
+   }
+ 
+   public void setTableStatsIsSet(boolean value) {
+     if (!value) {
+       this.tableStats = null;
+     }
+   }
+ 
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case TABLE_STATS:
+       if (value == null) {
+         unsetTableStats();
+       } else {
+         setTableStats((List<ColumnStatisticsObj>)value);
+       }
+       break;
+ 
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case TABLE_STATS:
+       return getTableStats();
+ 
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case TABLE_STATS:
+       return isSetTableStats();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof TableStatsResult)
+       return this.equals((TableStatsResult)that);
+     return false;
+   }
+ 
+   public boolean equals(TableStatsResult that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_tableStats = true && this.isSetTableStats();
+     boolean that_present_tableStats = true && that.isSetTableStats();
+     if (this_present_tableStats || that_present_tableStats) {
+       if (!(this_present_tableStats && that_present_tableStats))
+         return false;
+       if (!this.tableStats.equals(that.tableStats))
+         return false;
+     }
+ 
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_tableStats = true && (isSetTableStats());
+     list.add(present_tableStats);
+     if (present_tableStats)
+       list.add(tableStats);
+ 
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(TableStatsResult other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetTableStats()).compareTo(other.isSetTableStats());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTableStats()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableStats, other.tableStats);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("TableStatsResult(");
+     boolean first = true;
+ 
+     sb.append("tableStats:");
+     if (this.tableStats == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tableStats);
+     }
+     first = false;
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetTableStats()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableStats' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class TableStatsResultStandardSchemeFactory implements SchemeFactory {
+     public TableStatsResultStandardScheme getScheme() {
+       return new TableStatsResultStandardScheme();
+     }
+   }
+ 
+   private static class TableStatsResultStandardScheme extends StandardScheme<TableStatsResult> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // TABLE_STATS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list424 = iprot.readListBegin();
+                 struct.tableStats = new ArrayList<ColumnStatisticsObj>(_list424.size);
+                 ColumnStatisticsObj _elem425;
+                 for (int _i426 = 0; _i426 < _list424.size; ++_i426)
+                 {
+                   _elem425 = new ColumnStatisticsObj();
+                   _elem425.read(iprot);
+                   struct.tableStats.add(_elem425);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setTableStatsIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 2: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.tableStats != null) {
+         oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size()));
+           for (ColumnStatisticsObj _iter427 : struct.tableStats)
+           {
+             _iter427.write(oprot);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class TableStatsResultTupleSchemeFactory implements SchemeFactory {
+     public TableStatsResultTupleScheme getScheme() {
+       return new TableStatsResultTupleScheme();
+     }
+   }
+ 
+   private static class TableStatsResultTupleScheme extends TupleScheme<TableStatsResult> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       {
+         oprot.writeI32(struct.tableStats.size());
+         for (ColumnStatisticsObj _iter428 : struct.tableStats)
+         {
+           _iter428.write(oprot);
+         }
+       }
++      BitSet optionals = new BitSet();
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(0);
++      }
++      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       {
+         org.apache.thrift.protocol.TList _list429 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+         struct.tableStats = new ArrayList<ColumnStatisticsObj>(_list429.size);
+         ColumnStatisticsObj _elem430;
+         for (int _i431 = 0; _i431 < _list429.size; ++_i431)
+         {
+           _elem430 = new ColumnStatisticsObj();
+           _elem430.read(iprot);
+           struct.tableStats.add(_elem430);
+         }
+       }
+       struct.setTableStatsIsSet(true);
++      BitSet incoming = iprot.readBitSet(1);
++      if (incoming.get(0)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[48/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
new file mode 100644
index 0000000..352f5c7
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h
@@ -0,0 +1,30068 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef ThriftHiveMetastore_H
+#define ThriftHiveMetastore_H
+
+#include <thrift/TDispatchProcessor.h>
+#include <thrift/async/TConcurrentClientSyncInfo.h>
+#include "hive_metastore_types.h"
+#include "FacebookService.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+#ifdef _WIN32
+  #pragma warning( push )
+  #pragma warning (disable : 4250 ) //inheriting methods via dominance 
+#endif
+
+class ThriftHiveMetastoreIf : virtual public  ::facebook::fb303::FacebookServiceIf {
+ public:
+  virtual ~ThriftHiveMetastoreIf() {}
+  virtual void getMetaConf(std::string& _return, const std::string& key) = 0;
+  virtual void setMetaConf(const std::string& key, const std::string& value) = 0;
+  virtual void create_catalog(const CreateCatalogRequest& catalog) = 0;
+  virtual void alter_catalog(const AlterCatalogRequest& rqst) = 0;
+  virtual void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) = 0;
+  virtual void get_catalogs(GetCatalogsResponse& _return) = 0;
+  virtual void drop_catalog(const DropCatalogRequest& catName) = 0;
+  virtual void create_database(const Database& database) = 0;
+  virtual void get_database(Database& _return, const std::string& name) = 0;
+  virtual void drop_database(const std::string& name, const bool deleteData, const bool cascade) = 0;
+  virtual void get_databases(std::vector<std::string> & _return, const std::string& pattern) = 0;
+  virtual void get_all_databases(std::vector<std::string> & _return) = 0;
+  virtual void alter_database(const std::string& dbname, const Database& db) = 0;
+  virtual void get_type(Type& _return, const std::string& name) = 0;
+  virtual bool create_type(const Type& type) = 0;
+  virtual bool drop_type(const std::string& type) = 0;
+  virtual void get_type_all(std::map<std::string, Type> & _return, const std::string& name) = 0;
+  virtual void get_fields(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name) = 0;
+  virtual void get_fields_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) = 0;
+  virtual void get_schema(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name) = 0;
+  virtual void get_schema_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) = 0;
+  virtual void create_table(const Table& tbl) = 0;
+  virtual void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) = 0;
+  virtual void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints, const std::vector<SQLDefaultConstraint> & defaultConstraints, const std::vector<SQLCheckConstraint> & checkConstraints) = 0;
+  virtual void drop_constraint(const DropConstraintRequest& req) = 0;
+  virtual void add_primary_key(const AddPrimaryKeyRequest& req) = 0;
+  virtual void add_foreign_key(const AddForeignKeyRequest& req) = 0;
+  virtual void add_unique_constraint(const AddUniqueConstraintRequest& req) = 0;
+  virtual void add_not_null_constraint(const AddNotNullConstraintRequest& req) = 0;
+  virtual void add_default_constraint(const AddDefaultConstraintRequest& req) = 0;
+  virtual void add_check_constraint(const AddCheckConstraintRequest& req) = 0;
+  virtual void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) = 0;
+  virtual void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) = 0;
+  virtual void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames) = 0;
+  virtual void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) = 0;
+  virtual void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) = 0;
+  virtual void get_materialized_views_for_rewriting(std::vector<std::string> & _return, const std::string& db_name) = 0;
+  virtual void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types) = 0;
+  virtual void get_all_tables(std::vector<std::string> & _return, const std::string& db_name) = 0;
+  virtual void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) = 0;
+  virtual void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) = 0;
+  virtual void get_table_req(GetTableResult& _return, const GetTableRequest& req) = 0;
+  virtual void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) = 0;
+  virtual void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) = 0;
+  virtual void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) = 0;
+  virtual void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) = 0;
+  virtual void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) = 0;
+  virtual void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) = 0;
+  virtual void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) = 0;
+  virtual void add_partition(Partition& _return, const Partition& new_part) = 0;
+  virtual void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) = 0;
+  virtual int32_t add_partitions(const std::vector<Partition> & new_parts) = 0;
+  virtual int32_t add_partitions_pspec(const std::vector<PartitionSpec> & new_parts) = 0;
+  virtual void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) = 0;
+  virtual void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) = 0;
+  virtual void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context) = 0;
+  virtual void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0;
+  virtual void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) = 0;
+  virtual bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData) = 0;
+  virtual bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context) = 0;
+  virtual bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) = 0;
+  virtual bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) = 0;
+  virtual void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) = 0;
+  virtual void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) = 0;
+  virtual void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) = 0;
+  virtual void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) = 0;
+  virtual void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
+  virtual void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) = 0;
+  virtual void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
+  virtual void get_partitions_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
+  virtual void get_partitions_pspec(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) = 0;
+  virtual void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) = 0;
+  virtual void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request) = 0;
+  virtual void get_partitions_ps(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts) = 0;
+  virtual void get_partitions_ps_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
+  virtual void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts) = 0;
+  virtual void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) = 0;
+  virtual void get_part_specs_by_filter(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) = 0;
+  virtual void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) = 0;
+  virtual int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) = 0;
+  virtual void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names) = 0;
+  virtual void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) = 0;
+  virtual void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts) = 0;
+  virtual void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) = 0;
+  virtual void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) = 0;
+  virtual void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const Partition& new_part) = 0;
+  virtual bool partition_name_has_valid_characters(const std::vector<std::string> & part_vals, const bool throw_exception) = 0;
+  virtual void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) = 0;
+  virtual void partition_name_to_vals(std::vector<std::string> & _return, const std::string& part_name) = 0;
+  virtual void partition_name_to_spec(std::map<std::string, std::string> & _return, const std::string& part_name) = 0;
+  virtual void markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map<std::string, std::string> & part_vals, const PartitionEventType::type eventType) = 0;
+  virtual bool isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map<std::string, std::string> & part_vals, const PartitionEventType::type eventType) = 0;
+  virtual void get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) = 0;
+  virtual void get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) = 0;
+  virtual void get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) = 0;
+  virtual void get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) = 0;
+  virtual void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) = 0;
+  virtual void get_check_constraints(CheckConstraintsResponse& _return, const CheckConstraintsRequest& request) = 0;
+  virtual bool update_table_column_statistics(const ColumnStatistics& stats_obj) = 0;
+  virtual bool update_partition_column_statistics(const ColumnStatistics& stats_obj) = 0;
+  virtual void get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) = 0;
+  virtual void get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) = 0;
+  virtual void get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) = 0;
+  virtual void get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) = 0;
+  virtual void get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) = 0;
+  virtual bool set_aggr_stats_for(const SetPartitionsStatsRequest& request) = 0;
+  virtual bool delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) = 0;
+  virtual bool delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) = 0;
+  virtual void create_function(const Function& func) = 0;
+  virtual void drop_function(const std::string& dbName, const std::string& funcName) = 0;
+  virtual void alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) = 0;
+  virtual void get_functions(std::vector<std::string> & _return, const std::string& dbName, const std::string& pattern) = 0;
+  virtual void get_function(Function& _return, const std::string& dbName, const std::string& funcName) = 0;
+  virtual void get_all_functions(GetAllFunctionsResponse& _return) = 0;
+  virtual bool create_role(const Role& role) = 0;
+  virtual bool drop_role(const std::string& role_name) = 0;
+  virtual void get_role_names(std::vector<std::string> & _return) = 0;
+  virtual bool grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) = 0;
+  virtual bool revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) = 0;
+  virtual void list_roles(std::vector<Role> & _return, const std::string& principal_name, const PrincipalType::type principal_type) = 0;
+  virtual void grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) = 0;
+  virtual void get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) = 0;
+  virtual void get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) = 0;
+  virtual void get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
+  virtual void list_privileges(std::vector<HiveObjectPrivilege> & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) = 0;
+  virtual bool grant_privileges(const PrivilegeBag& privileges) = 0;
+  virtual bool revoke_privileges(const PrivilegeBag& privileges) = 0;
+  virtual void grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) = 0;
+  virtual void refresh_privileges(GrantRevokePrivilegeResponse& _return, const HiveObjectRef& objToRefresh, const std::string& authorizer, const GrantRevokePrivilegeRequest& grantRequest) = 0;
+  virtual void set_ugi(std::vector<std::string> & _return, const std::string& user_name, const std::vector<std::string> & group_names) = 0;
+  virtual void get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) = 0;
+  virtual int64_t renew_delegation_token(const std::string& token_str_form) = 0;
+  virtual void cancel_delegation_token(const std::string& token_str_form) = 0;
+  virtual bool add_token(const std::string& token_identifier, const std::string& delegation_token) = 0;
+  virtual bool remove_token(const std::string& token_identifier) = 0;
+  virtual void get_token(std::string& _return, const std::string& token_identifier) = 0;
+  virtual void get_all_token_identifiers(std::vector<std::string> & _return) = 0;
+  virtual int32_t add_master_key(const std::string& key) = 0;
+  virtual void update_master_key(const int32_t seq_number, const std::string& key) = 0;
+  virtual bool remove_master_key(const int32_t key_seq) = 0;
+  virtual void get_master_keys(std::vector<std::string> & _return) = 0;
+  virtual void get_open_txns(GetOpenTxnsResponse& _return) = 0;
+  virtual void get_open_txns_info(GetOpenTxnsInfoResponse& _return) = 0;
+  virtual void open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) = 0;
+  virtual void abort_txn(const AbortTxnRequest& rqst) = 0;
+  virtual void abort_txns(const AbortTxnsRequest& rqst) = 0;
+  virtual void commit_txn(const CommitTxnRequest& rqst) = 0;
+  virtual void repl_tbl_writeid_state(const ReplTblWriteIdStateRequest& rqst) = 0;
+  virtual void get_valid_write_ids(GetValidWriteIdsResponse& _return, const GetValidWriteIdsRequest& rqst) = 0;
+  virtual void allocate_table_write_ids(AllocateTableWriteIdsResponse& _return, const AllocateTableWriteIdsRequest& rqst) = 0;
+  virtual void lock(LockResponse& _return, const LockRequest& rqst) = 0;
+  virtual void check_lock(LockResponse& _return, const CheckLockRequest& rqst) = 0;
+  virtual void unlock(const UnlockRequest& rqst) = 0;
+  virtual void show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) = 0;
+  virtual void heartbeat(const HeartbeatRequest& ids) = 0;
+  virtual void heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) = 0;
+  virtual void compact(const CompactionRequest& rqst) = 0;
+  virtual void compact2(CompactionResponse& _return, const CompactionRequest& rqst) = 0;
+  virtual void show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) = 0;
+  virtual void add_dynamic_partitions(const AddDynamicPartitions& rqst) = 0;
+  virtual void get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) = 0;
+  virtual void get_current_notificationEventId(CurrentNotificationEventId& _return) = 0;
+  virtual void get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) = 0;
+  virtual void fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) = 0;
+  virtual void flushCache() = 0;
+  virtual void add_write_notification_log(WriteNotificationLogResponse& _return, const WriteNotificationLogRequest& rqst) = 0;
+  virtual void cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) = 0;
+  virtual void get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) = 0;
+  virtual void get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) = 0;
+  virtual void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) = 0;
+  virtual void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) = 0;
+  virtual void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) = 0;
+  virtual void get_metastore_db_uuid(std::string& _return) = 0;
+  virtual void create_resource_plan(WMCreateResourcePlanResponse& _return, const WMCreateResourcePlanRequest& request) = 0;
+  virtual void get_resource_plan(WMGetResourcePlanResponse& _return, const WMGetResourcePlanRequest& request) = 0;
+  virtual void get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const WMGetActiveResourcePlanRequest& request) = 0;
+  virtual void get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const WMGetAllResourcePlanRequest& request) = 0;
+  virtual void alter_resource_plan(WMAlterResourcePlanResponse& _return, const WMAlterResourcePlanRequest& request) = 0;
+  virtual void validate_resource_plan(WMValidateResourcePlanResponse& _return, const WMValidateResourcePlanRequest& request) = 0;
+  virtual void drop_resource_plan(WMDropResourcePlanResponse& _return, const WMDropResourcePlanRequest& request) = 0;
+  virtual void create_wm_trigger(WMCreateTriggerResponse& _return, const WMCreateTriggerRequest& request) = 0;
+  virtual void alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) = 0;
+  virtual void drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) = 0;
+  virtual void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) = 0;
+  virtual void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request) = 0;
+  virtual void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request) = 0;
+  virtual void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) = 0;
+  virtual void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) = 0;
+  virtual void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) = 0;
+  virtual void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) = 0;
+  virtual void create_ischema(const ISchema& schema) = 0;
+  virtual void alter_ischema(const AlterISchemaRequest& rqst) = 0;
+  virtual void get_ischema(ISchema& _return, const ISchemaName& name) = 0;
+  virtual void drop_ischema(const ISchemaName& name) = 0;
+  virtual void add_schema_version(const SchemaVersion& schemaVersion) = 0;
+  virtual void get_schema_version(SchemaVersion& _return, const SchemaVersionDescriptor& schemaVersion) = 0;
+  virtual void get_schema_latest_version(SchemaVersion& _return, const ISchemaName& schemaName) = 0;
+  virtual void get_schema_all_versions(std::vector<SchemaVersion> & _return, const ISchemaName& schemaName) = 0;
+  virtual void drop_schema_version(const SchemaVersionDescriptor& schemaVersion) = 0;
+  virtual void get_schemas_by_cols(FindSchemasByColsResp& _return, const FindSchemasByColsRqst& rqst) = 0;
+  virtual void map_schema_version_to_serde(const MapSchemaVersionToSerdeRequest& rqst) = 0;
+  virtual void set_schema_version_state(const SetSchemaVersionStateRequest& rqst) = 0;
+  virtual void add_serde(const SerDeInfo& serde) = 0;
+  virtual void get_serde(SerDeInfo& _return, const GetSerdeRequest& rqst) = 0;
+  virtual void get_lock_materialization_rebuild(LockResponse& _return, const std::string& dbName, const std::string& tableName, const int64_t txnId) = 0;
+  virtual bool heartbeat_lock_materialization_rebuild(const std::string& dbName, const std::string& tableName, const int64_t txnId) = 0;
+  virtual void add_runtime_stats(const RuntimeStat& stat) = 0;
+  virtual void get_runtime_stats(std::vector<RuntimeStat> & _return, const GetRuntimeStatsRequest& rqst) = 0;
+};
+
+class ThriftHiveMetastoreIfFactory : virtual public  ::facebook::fb303::FacebookServiceIfFactory {
+ public:
+  typedef ThriftHiveMetastoreIf Handler;
+
+  virtual ~ThriftHiveMetastoreIfFactory() {}
+
+  virtual ThriftHiveMetastoreIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0;
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* handler */) = 0;
+};
+
+class ThriftHiveMetastoreIfSingletonFactory : virtual public ThriftHiveMetastoreIfFactory {
+ public:
+  ThriftHiveMetastoreIfSingletonFactory(const boost::shared_ptr<ThriftHiveMetastoreIf>& iface) : iface_(iface) {}
+  virtual ~ThriftHiveMetastoreIfSingletonFactory() {}
+
+  virtual ThriftHiveMetastoreIf* getHandler(const ::apache::thrift::TConnectionInfo&) {
+    return iface_.get();
+  }
+  virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* handler */) {}
+
+ protected:
+  boost::shared_ptr<ThriftHiveMetastoreIf> iface_;
+};
+
+class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual public  ::facebook::fb303::FacebookServiceNull {
+ public:
+  virtual ~ThriftHiveMetastoreNull() {}
+  void getMetaConf(std::string& /* _return */, const std::string& /* key */) {
+    return;
+  }
+  void setMetaConf(const std::string& /* key */, const std::string& /* value */) {
+    return;
+  }
+  void create_catalog(const CreateCatalogRequest& /* catalog */) {
+    return;
+  }
+  void alter_catalog(const AlterCatalogRequest& /* rqst */) {
+    return;
+  }
+  void get_catalog(GetCatalogResponse& /* _return */, const GetCatalogRequest& /* catName */) {
+    return;
+  }
+  void get_catalogs(GetCatalogsResponse& /* _return */) {
+    return;
+  }
+  void drop_catalog(const DropCatalogRequest& /* catName */) {
+    return;
+  }
+  void create_database(const Database& /* database */) {
+    return;
+  }
+  void get_database(Database& /* _return */, const std::string& /* name */) {
+    return;
+  }
+  void drop_database(const std::string& /* name */, const bool /* deleteData */, const bool /* cascade */) {
+    return;
+  }
+  void get_databases(std::vector<std::string> & /* _return */, const std::string& /* pattern */) {
+    return;
+  }
+  void get_all_databases(std::vector<std::string> & /* _return */) {
+    return;
+  }
+  void alter_database(const std::string& /* dbname */, const Database& /* db */) {
+    return;
+  }
+  void get_type(Type& /* _return */, const std::string& /* name */) {
+    return;
+  }
+  bool create_type(const Type& /* type */) {
+    bool _return = false;
+    return _return;
+  }
+  bool drop_type(const std::string& /* type */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_type_all(std::map<std::string, Type> & /* _return */, const std::string& /* name */) {
+    return;
+  }
+  void get_fields(std::vector<FieldSchema> & /* _return */, const std::string& /* db_name */, const std::string& /* table_name */) {
+    return;
+  }
+  void get_fields_with_environment_context(std::vector<FieldSchema> & /* _return */, const std::string& /* db_name */, const std::string& /* table_name */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void get_schema(std::vector<FieldSchema> & /* _return */, const std::string& /* db_name */, const std::string& /* table_name */) {
+    return;
+  }
+  void get_schema_with_environment_context(std::vector<FieldSchema> & /* _return */, const std::string& /* db_name */, const std::string& /* table_name */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void create_table(const Table& /* tbl */) {
+    return;
+  }
+  void create_table_with_environment_context(const Table& /* tbl */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void create_table_with_constraints(const Table& /* tbl */, const std::vector<SQLPrimaryKey> & /* primaryKeys */, const std::vector<SQLForeignKey> & /* foreignKeys */, const std::vector<SQLUniqueConstraint> & /* uniqueConstraints */, const std::vector<SQLNotNullConstraint> & /* notNullConstraints */, const std::vector<SQLDefaultConstraint> & /* defaultConstraints */, const std::vector<SQLCheckConstraint> & /* checkConstraints */) {
+    return;
+  }
+  void drop_constraint(const DropConstraintRequest& /* req */) {
+    return;
+  }
+  void add_primary_key(const AddPrimaryKeyRequest& /* req */) {
+    return;
+  }
+  void add_foreign_key(const AddForeignKeyRequest& /* req */) {
+    return;
+  }
+  void add_unique_constraint(const AddUniqueConstraintRequest& /* req */) {
+    return;
+  }
+  void add_not_null_constraint(const AddNotNullConstraintRequest& /* req */) {
+    return;
+  }
+  void add_default_constraint(const AddDefaultConstraintRequest& /* req */) {
+    return;
+  }
+  void add_check_constraint(const AddCheckConstraintRequest& /* req */) {
+    return;
+  }
+  void drop_table(const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */) {
+    return;
+  }
+  void drop_table_with_environment_context(const std::string& /* dbname */, const std::string& /* name */, const bool /* deleteData */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void truncate_table(const std::string& /* dbName */, const std::string& /* tableName */, const std::vector<std::string> & /* partNames */) {
+    return;
+  }
+  void get_tables(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* pattern */) {
+    return;
+  }
+  void get_tables_by_type(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* pattern */, const std::string& /* tableType */) {
+    return;
+  }
+  void get_materialized_views_for_rewriting(std::vector<std::string> & /* _return */, const std::string& /* db_name */) {
+    return;
+  }
+  void get_table_meta(std::vector<TableMeta> & /* _return */, const std::string& /* db_patterns */, const std::string& /* tbl_patterns */, const std::vector<std::string> & /* tbl_types */) {
+    return;
+  }
+  void get_all_tables(std::vector<std::string> & /* _return */, const std::string& /* db_name */) {
+    return;
+  }
+  void get_table(Table& /* _return */, const std::string& /* dbname */, const std::string& /* tbl_name */) {
+    return;
+  }
+  void get_table_objects_by_name(std::vector<Table> & /* _return */, const std::string& /* dbname */, const std::vector<std::string> & /* tbl_names */) {
+    return;
+  }
+  void get_table_req(GetTableResult& /* _return */, const GetTableRequest& /* req */) {
+    return;
+  }
+  void get_table_objects_by_name_req(GetTablesResult& /* _return */, const GetTablesRequest& /* req */) {
+    return;
+  }
+  void get_materialization_invalidation_info(std::map<std::string, Materialization> & /* _return */, const std::string& /* dbname */, const std::vector<std::string> & /* tbl_names */) {
+    return;
+  }
+  void update_creation_metadata(const std::string& /* catName */, const std::string& /* dbname */, const std::string& /* tbl_name */, const CreationMetadata& /* creation_metadata */) {
+    return;
+  }
+  void get_table_names_by_filter(std::vector<std::string> & /* _return */, const std::string& /* dbname */, const std::string& /* filter */, const int16_t /* max_tables */) {
+    return;
+  }
+  void alter_table(const std::string& /* dbname */, const std::string& /* tbl_name */, const Table& /* new_tbl */) {
+    return;
+  }
+  void alter_table_with_environment_context(const std::string& /* dbname */, const std::string& /* tbl_name */, const Table& /* new_tbl */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void alter_table_with_cascade(const std::string& /* dbname */, const std::string& /* tbl_name */, const Table& /* new_tbl */, const bool /* cascade */) {
+    return;
+  }
+  void add_partition(Partition& /* _return */, const Partition& /* new_part */) {
+    return;
+  }
+  void add_partition_with_environment_context(Partition& /* _return */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  int32_t add_partitions(const std::vector<Partition> & /* new_parts */) {
+    int32_t _return = 0;
+    return _return;
+  }
+  int32_t add_partitions_pspec(const std::vector<PartitionSpec> & /* new_parts */) {
+    int32_t _return = 0;
+    return _return;
+  }
+  void append_partition(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */) {
+    return;
+  }
+  void add_partitions_req(AddPartitionsResult& /* _return */, const AddPartitionsRequest& /* request */) {
+    return;
+  }
+  void append_partition_with_environment_context(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void append_partition_by_name(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */) {
+    return;
+  }
+  void append_partition_by_name_with_environment_context(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  bool drop_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const bool /* deleteData */) {
+    bool _return = false;
+    return _return;
+  }
+  bool drop_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const bool /* deleteData */, const EnvironmentContext& /* environment_context */) {
+    bool _return = false;
+    return _return;
+  }
+  bool drop_partition_by_name(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const bool /* deleteData */) {
+    bool _return = false;
+    return _return;
+  }
+  bool drop_partition_by_name_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const bool /* deleteData */, const EnvironmentContext& /* environment_context */) {
+    bool _return = false;
+    return _return;
+  }
+  void drop_partitions_req(DropPartitionsResult& /* _return */, const DropPartitionsRequest& /* req */) {
+    return;
+  }
+  void get_partition(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */) {
+    return;
+  }
+  void exchange_partition(Partition& /* _return */, const std::map<std::string, std::string> & /* partitionSpecs */, const std::string& /* source_db */, const std::string& /* source_table_name */, const std::string& /* dest_db */, const std::string& /* dest_table_name */) {
+    return;
+  }
+  void exchange_partitions(std::vector<Partition> & /* _return */, const std::map<std::string, std::string> & /* partitionSpecs */, const std::string& /* source_db */, const std::string& /* source_table_name */, const std::string& /* dest_db */, const std::string& /* dest_table_name */) {
+    return;
+  }
+  void get_partition_with_auth(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
+    return;
+  }
+  void get_partition_by_name(Partition& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */) {
+    return;
+  }
+  void get_partitions(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_parts */) {
+    return;
+  }
+  void get_partitions_with_auth(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_parts */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
+    return;
+  }
+  void get_partitions_pspec(std::vector<PartitionSpec> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int32_t /* max_parts */) {
+    return;
+  }
+  void get_partition_names(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_parts */) {
+    return;
+  }
+  void get_partition_values(PartitionValuesResponse& /* _return */, const PartitionValuesRequest& /* request */) {
+    return;
+  }
+  void get_partitions_ps(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const int16_t /* max_parts */) {
+    return;
+  }
+  void get_partitions_ps_with_auth(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const int16_t /* max_parts */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
+    return;
+  }
+  void get_partition_names_ps(std::vector<std::string> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const int16_t /* max_parts */) {
+    return;
+  }
+  void get_partitions_by_filter(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* filter */, const int16_t /* max_parts */) {
+    return;
+  }
+  void get_part_specs_by_filter(std::vector<PartitionSpec> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* filter */, const int32_t /* max_parts */) {
+    return;
+  }
+  void get_partitions_by_expr(PartitionsByExprResult& /* _return */, const PartitionsByExprRequest& /* req */) {
+    return;
+  }
+  int32_t get_num_partitions_by_filter(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* filter */) {
+    int32_t _return = 0;
+    return _return;
+  }
+  void get_partitions_by_names(std::vector<Partition> & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* names */) {
+    return;
+  }
+  void alter_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */) {
+    return;
+  }
+  void alter_partitions(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */) {
+    return;
+  }
+  void alter_partitions_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<Partition> & /* new_parts */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void alter_partition_with_environment_context(const std::string& /* db_name */, const std::string& /* tbl_name */, const Partition& /* new_part */, const EnvironmentContext& /* environment_context */) {
+    return;
+  }
+  void rename_partition(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::vector<std::string> & /* part_vals */, const Partition& /* new_part */) {
+    return;
+  }
+  bool partition_name_has_valid_characters(const std::vector<std::string> & /* part_vals */, const bool /* throw_exception */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_config_value(std::string& /* _return */, const std::string& /* name */, const std::string& /* defaultValue */) {
+    return;
+  }
+  void partition_name_to_vals(std::vector<std::string> & /* _return */, const std::string& /* part_name */) {
+    return;
+  }
+  void partition_name_to_spec(std::map<std::string, std::string> & /* _return */, const std::string& /* part_name */) {
+    return;
+  }
+  void markPartitionForEvent(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::map<std::string, std::string> & /* part_vals */, const PartitionEventType::type /* eventType */) {
+    return;
+  }
+  bool isPartitionMarkedForEvent(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::map<std::string, std::string> & /* part_vals */, const PartitionEventType::type /* eventType */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_primary_keys(PrimaryKeysResponse& /* _return */, const PrimaryKeysRequest& /* request */) {
+    return;
+  }
+  void get_foreign_keys(ForeignKeysResponse& /* _return */, const ForeignKeysRequest& /* request */) {
+    return;
+  }
+  void get_unique_constraints(UniqueConstraintsResponse& /* _return */, const UniqueConstraintsRequest& /* request */) {
+    return;
+  }
+  void get_not_null_constraints(NotNullConstraintsResponse& /* _return */, const NotNullConstraintsRequest& /* request */) {
+    return;
+  }
+  void get_default_constraints(DefaultConstraintsResponse& /* _return */, const DefaultConstraintsRequest& /* request */) {
+    return;
+  }
+  void get_check_constraints(CheckConstraintsResponse& /* _return */, const CheckConstraintsRequest& /* request */) {
+    return;
+  }
+  bool update_table_column_statistics(const ColumnStatistics& /* stats_obj */) {
+    bool _return = false;
+    return _return;
+  }
+  bool update_partition_column_statistics(const ColumnStatistics& /* stats_obj */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_table_column_statistics(ColumnStatistics& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* col_name */) {
+    return;
+  }
+  void get_partition_column_statistics(ColumnStatistics& /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const std::string& /* col_name */) {
+    return;
+  }
+  void get_table_statistics_req(TableStatsResult& /* _return */, const TableStatsRequest& /* request */) {
+    return;
+  }
+  void get_partitions_statistics_req(PartitionsStatsResult& /* _return */, const PartitionsStatsRequest& /* request */) {
+    return;
+  }
+  void get_aggr_stats_for(AggrStats& /* _return */, const PartitionsStatsRequest& /* request */) {
+    return;
+  }
+  bool set_aggr_stats_for(const SetPartitionsStatsRequest& /* request */) {
+    bool _return = false;
+    return _return;
+  }
+  bool delete_partition_column_statistics(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const std::string& /* col_name */) {
+    bool _return = false;
+    return _return;
+  }
+  bool delete_table_column_statistics(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* col_name */) {
+    bool _return = false;
+    return _return;
+  }
+  void create_function(const Function& /* func */) {
+    return;
+  }
+  void drop_function(const std::string& /* dbName */, const std::string& /* funcName */) {
+    return;
+  }
+  void alter_function(const std::string& /* dbName */, const std::string& /* funcName */, const Function& /* newFunc */) {
+    return;
+  }
+  void get_functions(std::vector<std::string> & /* _return */, const std::string& /* dbName */, const std::string& /* pattern */) {
+    return;
+  }
+  void get_function(Function& /* _return */, const std::string& /* dbName */, const std::string& /* funcName */) {
+    return;
+  }
+  void get_all_functions(GetAllFunctionsResponse& /* _return */) {
+    return;
+  }
+  bool create_role(const Role& /* role */) {
+    bool _return = false;
+    return _return;
+  }
+  bool drop_role(const std::string& /* role_name */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_role_names(std::vector<std::string> & /* _return */) {
+    return;
+  }
+  bool grant_role(const std::string& /* role_name */, const std::string& /* principal_name */, const PrincipalType::type /* principal_type */, const std::string& /* grantor */, const PrincipalType::type /* grantorType */, const bool /* grant_option */) {
+    bool _return = false;
+    return _return;
+  }
+  bool revoke_role(const std::string& /* role_name */, const std::string& /* principal_name */, const PrincipalType::type /* principal_type */) {
+    bool _return = false;
+    return _return;
+  }
+  void list_roles(std::vector<Role> & /* _return */, const std::string& /* principal_name */, const PrincipalType::type /* principal_type */) {
+    return;
+  }
+  void grant_revoke_role(GrantRevokeRoleResponse& /* _return */, const GrantRevokeRoleRequest& /* request */) {
+    return;
+  }
+  void get_principals_in_role(GetPrincipalsInRoleResponse& /* _return */, const GetPrincipalsInRoleRequest& /* request */) {
+    return;
+  }
+  void get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& /* _return */, const GetRoleGrantsForPrincipalRequest& /* request */) {
+    return;
+  }
+  void get_privilege_set(PrincipalPrivilegeSet& /* _return */, const HiveObjectRef& /* hiveObject */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
+    return;
+  }
+  void list_privileges(std::vector<HiveObjectPrivilege> & /* _return */, const std::string& /* principal_name */, const PrincipalType::type /* principal_type */, const HiveObjectRef& /* hiveObject */) {
+    return;
+  }
+  bool grant_privileges(const PrivilegeBag& /* privileges */) {
+    bool _return = false;
+    return _return;
+  }
+  bool revoke_privileges(const PrivilegeBag& /* privileges */) {
+    bool _return = false;
+    return _return;
+  }
+  void grant_revoke_privileges(GrantRevokePrivilegeResponse& /* _return */, const GrantRevokePrivilegeRequest& /* request */) {
+    return;
+  }
+  void refresh_privileges(GrantRevokePrivilegeResponse& /* _return */, const HiveObjectRef& /* objToRefresh */, const std::string& /* authorizer */, const GrantRevokePrivilegeRequest& /* grantRequest */) {
+    return;
+  }
+  void set_ugi(std::vector<std::string> & /* _return */, const std::string& /* user_name */, const std::vector<std::string> & /* group_names */) {
+    return;
+  }
+  void get_delegation_token(std::string& /* _return */, const std::string& /* token_owner */, const std::string& /* renewer_kerberos_principal_name */) {
+    return;
+  }
+  int64_t renew_delegation_token(const std::string& /* token_str_form */) {
+    int64_t _return = 0;
+    return _return;
+  }
+  void cancel_delegation_token(const std::string& /* token_str_form */) {
+    return;
+  }
+  bool add_token(const std::string& /* token_identifier */, const std::string& /* delegation_token */) {
+    bool _return = false;
+    return _return;
+  }
+  bool remove_token(const std::string& /* token_identifier */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_token(std::string& /* _return */, const std::string& /* token_identifier */) {
+    return;
+  }
+  void get_all_token_identifiers(std::vector<std::string> & /* _return */) {
+    return;
+  }
+  int32_t add_master_key(const std::string& /* key */) {
+    int32_t _return = 0;
+    return _return;
+  }
+  void update_master_key(const int32_t /* seq_number */, const std::string& /* key */) {
+    return;
+  }
+  bool remove_master_key(const int32_t /* key_seq */) {
+    bool _return = false;
+    return _return;
+  }
+  void get_master_keys(std::vector<std::string> & /* _return */) {
+    return;
+  }
+  void get_open_txns(GetOpenTxnsResponse& /* _return */) {
+    return;
+  }
+  void get_open_txns_info(GetOpenTxnsInfoResponse& /* _return */) {
+    return;
+  }
+  void open_txns(OpenTxnsResponse& /* _return */, const OpenTxnRequest& /* rqst */) {
+    return;
+  }
+  void abort_txn(const AbortTxnRequest& /* rqst */) {
+    return;
+  }
+  void abort_txns(const AbortTxnsRequest& /* rqst */) {
+    return;
+  }
+  void commit_txn(const CommitTxnRequest& /* rqst */) {
+    return;
+  }
+  void repl_tbl_writeid_state(const ReplTblWriteIdStateRequest& /* rqst */) {
+    return;
+  }
+  void get_valid_write_ids(GetValidWriteIdsResponse& /* _return */, const GetValidWriteIdsRequest& /* rqst */) {
+    return;
+  }
+  void allocate_table_write_ids(AllocateTableWriteIdsResponse& /* _return */, const AllocateTableWriteIdsRequest& /* rqst */) {
+    return;
+  }
+  void lock(LockResponse& /* _return */, const LockRequest& /* rqst */) {
+    return;
+  }
+  void check_lock(LockResponse& /* _return */, const CheckLockRequest& /* rqst */) {
+    return;
+  }
+  void unlock(const UnlockRequest& /* rqst */) {
+    return;
+  }
+  void show_locks(ShowLocksResponse& /* _return */, const ShowLocksRequest& /* rqst */) {
+    return;
+  }
+  void heartbeat(const HeartbeatRequest& /* ids */) {
+    return;
+  }
+  void heartbeat_txn_range(HeartbeatTxnRangeResponse& /* _return */, const HeartbeatTxnRangeRequest& /* txns */) {
+    return;
+  }
+  void compact(const CompactionRequest& /* rqst */) {
+    return;
+  }
+  void compact2(CompactionResponse& /* _return */, const CompactionRequest& /* rqst */) {
+    return;
+  }
+  void show_compact(ShowCompactResponse& /* _return */, const ShowCompactRequest& /* rqst */) {
+    return;
+  }
+  void add_dynamic_partitions(const AddDynamicPartitions& /* rqst */) {
+    return;
+  }
+  void get_next_notification(NotificationEventResponse& /* _return */, const NotificationEventRequest& /* rqst */) {
+    return;
+  }
+  void get_current_notificationEventId(CurrentNotificationEventId& /* _return */) {
+    return;
+  }
+  void get_notification_events_count(NotificationEventsCountResponse& /* _return */, const NotificationEventsCountRequest& /* rqst */) {
+    return;
+  }
+  void fire_listener_event(FireEventResponse& /* _return */, const FireEventRequest& /* rqst */) {
+    return;
+  }
+  void flushCache() {
+    return;
+  }
+  void add_write_notification_log(WriteNotificationLogResponse& /* _return */, const WriteNotificationLogRequest& /* rqst */) {
+    return;
+  }
+  void cm_recycle(CmRecycleResponse& /* _return */, const CmRecycleRequest& /* request */) {
+    return;
+  }
+  void get_file_metadata_by_expr(GetFileMetadataByExprResult& /* _return */, const GetFileMetadataByExprRequest& /* req */) {
+    return;
+  }
+  void get_file_metadata(GetFileMetadataResult& /* _return */, const GetFileMetadataRequest& /* req */) {
+    return;
+  }
+  void put_file_metadata(PutFileMetadataResult& /* _return */, const PutFileMetadataRequest& /* req */) {
+    return;
+  }
+  void clear_file_metadata(ClearFileMetadataResult& /* _return */, const ClearFileMetadataRequest& /* req */) {
+    return;
+  }
+  void cache_file_metadata(CacheFileMetadataResult& /* _return */, const CacheFileMetadataRequest& /* req */) {
+    return;
+  }
+  void get_metastore_db_uuid(std::string& /* _return */) {
+    return;
+  }
+  void create_resource_plan(WMCreateResourcePlanResponse& /* _return */, const WMCreateResourcePlanRequest& /* request */) {
+    return;
+  }
+  void get_resource_plan(WMGetResourcePlanResponse& /* _return */, const WMGetResourcePlanRequest& /* request */) {
+    return;
+  }
+  void get_active_resource_plan(WMGetActiveResourcePlanResponse& /* _return */, const WMGetActiveResourcePlanRequest& /* request */) {
+    return;
+  }
+  void get_all_resource_plans(WMGetAllResourcePlanResponse& /* _return */, const WMGetAllResourcePlanRequest& /* request */) {
+    return;
+  }
+  void alter_resource_plan(WMAlterResourcePlanResponse& /* _return */, const WMAlterResourcePlanRequest& /* request */) {
+    return;
+  }
+  void validate_resource_plan(WMValidateResourcePlanResponse& /* _return */, const WMValidateResourcePlanRequest& /* request */) {
+    return;
+  }
+  void drop_resource_plan(WMDropResourcePlanResponse& /* _return */, const WMDropResourcePlanRequest& /* request */) {
+    return;
+  }
+  void create_wm_trigger(WMCreateTriggerResponse& /* _return */, const WMCreateTriggerRequest& /* request */) {
+    return;
+  }
+  void alter_wm_trigger(WMAlterTriggerResponse& /* _return */, const WMAlterTriggerRequest& /* request */) {
+    return;
+  }
+  void drop_wm_trigger(WMDropTriggerResponse& /* _return */, const WMDropTriggerRequest& /* request */) {
+    return;
+  }
+  void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& /* _return */, const WMGetTriggersForResourePlanRequest& /* request */) {
+    return;
+  }
+  void create_wm_pool(WMCreatePoolResponse& /* _return */, const WMCreatePoolRequest& /* request */) {
+    return;
+  }
+  void alter_wm_pool(WMAlterPoolResponse& /* _return */, const WMAlterPoolRequest& /* request */) {
+    return;
+  }
+  void drop_wm_pool(WMDropPoolResponse& /* _return */, const WMDropPoolRequest& /* request */) {
+    return;
+  }
+  void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& /* _return */, const WMCreateOrUpdateMappingRequest& /* request */) {
+    return;
+  }
+  void drop_wm_mapping(WMDropMappingResponse& /* _return */, const WMDropMappingRequest& /* request */) {
+    return;
+  }
+  void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& /* _return */, const WMCreateOrDropTriggerToPoolMappingRequest& /* request */) {
+    return;
+  }
+  void create_ischema(const ISchema& /* schema */) {
+    return;
+  }
+  void alter_ischema(const AlterISchemaRequest& /* rqst */) {
+    return;
+  }
+  void get_ischema(ISchema& /* _return */, const ISchemaName& /* name */) {
+    return;
+  }
+  void drop_ischema(const ISchemaName& /* name */) {
+    return;
+  }
+  void add_schema_version(const SchemaVersion& /* schemaVersion */) {
+    return;
+  }
+  void get_schema_version(SchemaVersion& /* _return */, const SchemaVersionDescriptor& /* schemaVersion */) {
+    return;
+  }
+  void get_schema_latest_version(SchemaVersion& /* _return */, const ISchemaName& /* schemaName */) {
+    return;
+  }
+  void get_schema_all_versions(std::vector<SchemaVersion> & /* _return */, const ISchemaName& /* schemaName */) {
+    return;
+  }
+  void drop_schema_version(const SchemaVersionDescriptor& /* schemaVersion */) {
+    return;
+  }
+  void get_schemas_by_cols(FindSchemasByColsResp& /* _return */, const FindSchemasByColsRqst& /* rqst */) {
+    return;
+  }
+  void map_schema_version_to_serde(const MapSchemaVersionToSerdeRequest& /* rqst */) {
+    return;
+  }
+  void set_schema_version_state(const SetSchemaVersionStateRequest& /* rqst */) {
+    return;
+  }
+  void add_serde(const SerDeInfo& /* serde */) {
+    return;
+  }
+  void get_serde(SerDeInfo& /* _return */, const GetSerdeRequest& /* rqst */) {
+    return;
+  }
+  void get_lock_materialization_rebuild(LockResponse& /* _return */, const std::string& /* dbName */, const std::string& /* tableName */, const int64_t /* txnId */) {
+    return;
+  }
+  bool heartbeat_lock_materialization_rebuild(const std::string& /* dbName */, const std::string& /* tableName */, const int64_t /* txnId */) {
+    bool _return = false;
+    return _return;
+  }
+  void add_runtime_stats(const RuntimeStat& /* stat */) {
+    return;
+  }
+  void get_runtime_stats(std::vector<RuntimeStat> & /* _return */, const GetRuntimeStatsRequest& /* rqst */) {
+    return;
+  }
+};
+
+typedef struct _ThriftHiveMetastore_getMetaConf_args__isset {
+  _ThriftHiveMetastore_getMetaConf_args__isset() : key(false) {}
+  bool key :1;
+} _ThriftHiveMetastore_getMetaConf_args__isset;
+
+class ThriftHiveMetastore_getMetaConf_args {
+ public:
+
+  ThriftHiveMetastore_getMetaConf_args(const ThriftHiveMetastore_getMetaConf_args&);
+  ThriftHiveMetastore_getMetaConf_args& operator=(const ThriftHiveMetastore_getMetaConf_args&);
+  ThriftHiveMetastore_getMetaConf_args() : key() {
+  }
+
+  virtual ~ThriftHiveMetastore_getMetaConf_args() throw();
+  std::string key;
+
+  _ThriftHiveMetastore_getMetaConf_args__isset __isset;
+
+  void __set_key(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_getMetaConf_args & rhs) const
+  {
+    if (!(key == rhs.key))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_getMetaConf_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_getMetaConf_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_getMetaConf_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_getMetaConf_pargs() throw();
+  const std::string* key;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_getMetaConf_result__isset {
+  _ThriftHiveMetastore_getMetaConf_result__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_getMetaConf_result__isset;
+
+class ThriftHiveMetastore_getMetaConf_result {
+ public:
+
+  ThriftHiveMetastore_getMetaConf_result(const ThriftHiveMetastore_getMetaConf_result&);
+  ThriftHiveMetastore_getMetaConf_result& operator=(const ThriftHiveMetastore_getMetaConf_result&);
+  ThriftHiveMetastore_getMetaConf_result() : success() {
+  }
+
+  virtual ~ThriftHiveMetastore_getMetaConf_result() throw();
+  std::string success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_getMetaConf_result__isset __isset;
+
+  void __set_success(const std::string& val);
+
+  void __set_o1(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_getMetaConf_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_getMetaConf_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_getMetaConf_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_getMetaConf_presult__isset {
+  _ThriftHiveMetastore_getMetaConf_presult__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_getMetaConf_presult__isset;
+
+class ThriftHiveMetastore_getMetaConf_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_getMetaConf_presult() throw();
+  std::string* success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_getMetaConf_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_setMetaConf_args__isset {
+  _ThriftHiveMetastore_setMetaConf_args__isset() : key(false), value(false) {}
+  bool key :1;
+  bool value :1;
+} _ThriftHiveMetastore_setMetaConf_args__isset;
+
+class ThriftHiveMetastore_setMetaConf_args {
+ public:
+
+  ThriftHiveMetastore_setMetaConf_args(const ThriftHiveMetastore_setMetaConf_args&);
+  ThriftHiveMetastore_setMetaConf_args& operator=(const ThriftHiveMetastore_setMetaConf_args&);
+  ThriftHiveMetastore_setMetaConf_args() : key(), value() {
+  }
+
+  virtual ~ThriftHiveMetastore_setMetaConf_args() throw();
+  std::string key;
+  std::string value;
+
+  _ThriftHiveMetastore_setMetaConf_args__isset __isset;
+
+  void __set_key(const std::string& val);
+
+  void __set_value(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_setMetaConf_args & rhs) const
+  {
+    if (!(key == rhs.key))
+      return false;
+    if (!(value == rhs.value))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_setMetaConf_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_setMetaConf_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_setMetaConf_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_setMetaConf_pargs() throw();
+  const std::string* key;
+  const std::string* value;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_setMetaConf_result__isset {
+  _ThriftHiveMetastore_setMetaConf_result__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_setMetaConf_result__isset;
+
+class ThriftHiveMetastore_setMetaConf_result {
+ public:
+
+  ThriftHiveMetastore_setMetaConf_result(const ThriftHiveMetastore_setMetaConf_result&);
+  ThriftHiveMetastore_setMetaConf_result& operator=(const ThriftHiveMetastore_setMetaConf_result&);
+  ThriftHiveMetastore_setMetaConf_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_setMetaConf_result() throw();
+  MetaException o1;
+
+  _ThriftHiveMetastore_setMetaConf_result__isset __isset;
+
+  void __set_o1(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_setMetaConf_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_setMetaConf_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_setMetaConf_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_setMetaConf_presult__isset {
+  _ThriftHiveMetastore_setMetaConf_presult__isset() : o1(false) {}
+  bool o1 :1;
+} _ThriftHiveMetastore_setMetaConf_presult__isset;
+
+class ThriftHiveMetastore_setMetaConf_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_setMetaConf_presult() throw();
+  MetaException o1;
+
+  _ThriftHiveMetastore_setMetaConf_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_create_catalog_args__isset {
+  _ThriftHiveMetastore_create_catalog_args__isset() : catalog(false) {}
+  bool catalog :1;
+} _ThriftHiveMetastore_create_catalog_args__isset;
+
+class ThriftHiveMetastore_create_catalog_args {
+ public:
+
+  ThriftHiveMetastore_create_catalog_args(const ThriftHiveMetastore_create_catalog_args&);
+  ThriftHiveMetastore_create_catalog_args& operator=(const ThriftHiveMetastore_create_catalog_args&);
+  ThriftHiveMetastore_create_catalog_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_create_catalog_args() throw();
+  CreateCatalogRequest catalog;
+
+  _ThriftHiveMetastore_create_catalog_args__isset __isset;
+
+  void __set_catalog(const CreateCatalogRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_create_catalog_args & rhs) const
+  {
+    if (!(catalog == rhs.catalog))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_create_catalog_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_create_catalog_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_create_catalog_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_create_catalog_pargs() throw();
+  const CreateCatalogRequest* catalog;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_catalog_result__isset {
+  _ThriftHiveMetastore_create_catalog_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_create_catalog_result__isset;
+
+class ThriftHiveMetastore_create_catalog_result {
+ public:
+
+  ThriftHiveMetastore_create_catalog_result(const ThriftHiveMetastore_create_catalog_result&);
+  ThriftHiveMetastore_create_catalog_result& operator=(const ThriftHiveMetastore_create_catalog_result&);
+  ThriftHiveMetastore_create_catalog_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_create_catalog_result() throw();
+  AlreadyExistsException o1;
+  InvalidObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_create_catalog_result__isset __isset;
+
+  void __set_o1(const AlreadyExistsException& val);
+
+  void __set_o2(const InvalidObjectException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_create_catalog_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_create_catalog_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_create_catalog_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_catalog_presult__isset {
+  _ThriftHiveMetastore_create_catalog_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_create_catalog_presult__isset;
+
+class ThriftHiveMetastore_create_catalog_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_create_catalog_presult() throw();
+  AlreadyExistsException o1;
+  InvalidObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_create_catalog_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_args__isset {
+  _ThriftHiveMetastore_alter_catalog_args__isset() : rqst(false) {}
+  bool rqst :1;
+} _ThriftHiveMetastore_alter_catalog_args__isset;
+
+class ThriftHiveMetastore_alter_catalog_args {
+ public:
+
+  ThriftHiveMetastore_alter_catalog_args(const ThriftHiveMetastore_alter_catalog_args&);
+  ThriftHiveMetastore_alter_catalog_args& operator=(const ThriftHiveMetastore_alter_catalog_args&);
+  ThriftHiveMetastore_alter_catalog_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_catalog_args() throw();
+  AlterCatalogRequest rqst;
+
+  _ThriftHiveMetastore_alter_catalog_args__isset __isset;
+
+  void __set_rqst(const AlterCatalogRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_catalog_args & rhs) const
+  {
+    if (!(rqst == rhs.rqst))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_catalog_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_catalog_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_alter_catalog_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_catalog_pargs() throw();
+  const AlterCatalogRequest* rqst;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_result__isset {
+  _ThriftHiveMetastore_alter_catalog_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_alter_catalog_result__isset;
+
+class ThriftHiveMetastore_alter_catalog_result {
+ public:
+
+  ThriftHiveMetastore_alter_catalog_result(const ThriftHiveMetastore_alter_catalog_result&);
+  ThriftHiveMetastore_alter_catalog_result& operator=(const ThriftHiveMetastore_alter_catalog_result&);
+  ThriftHiveMetastore_alter_catalog_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_alter_catalog_result() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_alter_catalog_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const InvalidOperationException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_alter_catalog_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_alter_catalog_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_alter_catalog_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_alter_catalog_presult__isset {
+  _ThriftHiveMetastore_alter_catalog_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_alter_catalog_presult__isset;
+
+class ThriftHiveMetastore_alter_catalog_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_alter_catalog_presult() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_alter_catalog_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_catalog_args__isset {
+  _ThriftHiveMetastore_get_catalog_args__isset() : catName(false) {}
+  bool catName :1;
+} _ThriftHiveMetastore_get_catalog_args__isset;
+
+class ThriftHiveMetastore_get_catalog_args {
+ public:
+
+  ThriftHiveMetastore_get_catalog_args(const ThriftHiveMetastore_get_catalog_args&);
+  ThriftHiveMetastore_get_catalog_args& operator=(const ThriftHiveMetastore_get_catalog_args&);
+  ThriftHiveMetastore_get_catalog_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_catalog_args() throw();
+  GetCatalogRequest catName;
+
+  _ThriftHiveMetastore_get_catalog_args__isset __isset;
+
+  void __set_catName(const GetCatalogRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_get_catalog_args & rhs) const
+  {
+    if (!(catName == rhs.catName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_catalog_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_catalog_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_catalog_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_catalog_pargs() throw();
+  const GetCatalogRequest* catName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_catalog_result__isset {
+  _ThriftHiveMetastore_get_catalog_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_catalog_result__isset;
+
+class ThriftHiveMetastore_get_catalog_result {
+ public:
+
+  ThriftHiveMetastore_get_catalog_result(const ThriftHiveMetastore_get_catalog_result&);
+  ThriftHiveMetastore_get_catalog_result& operator=(const ThriftHiveMetastore_get_catalog_result&);
+  ThriftHiveMetastore_get_catalog_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_catalog_result() throw();
+  GetCatalogResponse success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_catalog_result__isset __isset;
+
+  void __set_success(const GetCatalogResponse& val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_catalog_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_catalog_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_catalog_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_catalog_presult__isset {
+  _ThriftHiveMetastore_get_catalog_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_catalog_presult__isset;
+
+class ThriftHiveMetastore_get_catalog_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_catalog_presult() throw();
+  GetCatalogResponse* success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_catalog_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+
+class ThriftHiveMetastore_get_catalogs_args {
+ public:
+
+  ThriftHiveMetastore_get_catalogs_args(const ThriftHiveMetastore_get_catalogs_args&);
+  ThriftHiveMetastore_get_catalogs_args& operator=(const ThriftHiveMetastore_get_catalogs_args&);
+  ThriftHiveMetastore_get_catalogs_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_catalogs_args() throw();
+
+  bool operator == (const ThriftHiveMetastore_get_catalogs_args & /* rhs */) const
+  {
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_catalogs_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_catalogs_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_catalogs_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_catalogs_pargs() throw();
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_catalogs_result__isset {
+  _ThriftHiveMetastore_get_catalogs_result__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_get_catalogs_result__isset;
+
+class ThriftHiveMetastore_get_catalogs_result {
+ public:
+
+  ThriftHiveMetastore_get_catalogs_result(const ThriftHiveMetastore_get_catalogs_result&);
+  ThriftHiveMetastore_get_catalogs_result& operator=(const ThriftHiveMetastore_get_catalogs_result&);
+  ThriftHiveMetastore_get_catalogs_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_catalogs_result() throw();
+  GetCatalogsResponse success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_get_catalogs_result__isset __isset;
+
+  void __set_success(const GetCatalogsResponse& val);
+
+  void __set_o1(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_catalogs_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_catalogs_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_catalogs_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_catalogs_presult__isset {
+  _ThriftHiveMetastore_get_catalogs_presult__isset() : success(false), o1(false) {}
+  bool success :1;
+  bool o1 :1;
+} _ThriftHiveMetastore_get_catalogs_presult__isset;
+
+class ThriftHiveMetastore_get_catalogs_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_catalogs_presult() throw();
+  GetCatalogsResponse* success;
+  MetaException o1;
+
+  _ThriftHiveMetastore_get_catalogs_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_catalog_args__isset {
+  _ThriftHiveMetastore_drop_catalog_args__isset() : catName(false) {}
+  bool catName :1;
+} _ThriftHiveMetastore_drop_catalog_args__isset;
+
+class ThriftHiveMetastore_drop_catalog_args {
+ public:
+
+  ThriftHiveMetastore_drop_catalog_args(const ThriftHiveMetastore_drop_catalog_args&);
+  ThriftHiveMetastore_drop_catalog_args& operator=(const ThriftHiveMetastore_drop_catalog_args&);
+  ThriftHiveMetastore_drop_catalog_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_drop_catalog_args() throw();
+  DropCatalogRequest catName;
+
+  _ThriftHiveMetastore_drop_catalog_args__isset __isset;
+
+  void __set_catName(const DropCatalogRequest& val);
+
+  bool operator == (const ThriftHiveMetastore_drop_catalog_args & rhs) const
+  {
+    if (!(catName == rhs.catName))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_drop_catalog_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_drop_catalog_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_drop_catalog_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_drop_catalog_pargs() throw();
+  const DropCatalogRequest* catName;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_catalog_result__isset {
+  _ThriftHiveMetastore_drop_catalog_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_drop_catalog_result__isset;
+
+class ThriftHiveMetastore_drop_catalog_result {
+ public:
+
+  ThriftHiveMetastore_drop_catalog_result(const ThriftHiveMetastore_drop_catalog_result&);
+  ThriftHiveMetastore_drop_catalog_result& operator=(const ThriftHiveMetastore_drop_catalog_result&);
+  ThriftHiveMetastore_drop_catalog_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_drop_catalog_result() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_drop_catalog_result__isset __isset;
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const InvalidOperationException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_drop_catalog_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_drop_catalog_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_drop_catalog_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_catalog_presult__isset {
+  _ThriftHiveMetastore_drop_catalog_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_drop_catalog_presult__isset;
+
+class ThriftHiveMetastore_drop_catalog_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_drop_catalog_presult() throw();
+  NoSuchObjectException o1;
+  InvalidOperationException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_drop_catalog_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_create_database_args__isset {
+  _ThriftHiveMetastore_create_database_args__isset() : database(false) {}
+  bool database :1;
+} _ThriftHiveMetastore_create_database_args__isset;
+
+class ThriftHiveMetastore_create_database_args {
+ public:
+
+  ThriftHiveMetastore_create_database_args(const ThriftHiveMetastore_create_database_args&);
+  ThriftHiveMetastore_create_database_args& operator=(const ThriftHiveMetastore_create_database_args&);
+  ThriftHiveMetastore_create_database_args() {
+  }
+
+  virtual ~ThriftHiveMetastore_create_database_args() throw();
+  Database database;
+
+  _ThriftHiveMetastore_create_database_args__isset __isset;
+
+  void __set_database(const Database& val);
+
+  bool operator == (const ThriftHiveMetastore_create_database_args & rhs) const
+  {
+    if (!(database == rhs.database))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_create_database_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_create_database_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_create_database_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_create_database_pargs() throw();
+  const Database* database;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_database_result__isset {
+  _ThriftHiveMetastore_create_database_result__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_create_database_result__isset;
+
+class ThriftHiveMetastore_create_database_result {
+ public:
+
+  ThriftHiveMetastore_create_database_result(const ThriftHiveMetastore_create_database_result&);
+  ThriftHiveMetastore_create_database_result& operator=(const ThriftHiveMetastore_create_database_result&);
+  ThriftHiveMetastore_create_database_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_create_database_result() throw();
+  AlreadyExistsException o1;
+  InvalidObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_create_database_result__isset __isset;
+
+  void __set_o1(const AlreadyExistsException& val);
+
+  void __set_o2(const InvalidObjectException& val);
+
+  void __set_o3(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_create_database_result & rhs) const
+  {
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    if (!(o3 == rhs.o3))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_create_database_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_create_database_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_create_database_presult__isset {
+  _ThriftHiveMetastore_create_database_presult__isset() : o1(false), o2(false), o3(false) {}
+  bool o1 :1;
+  bool o2 :1;
+  bool o3 :1;
+} _ThriftHiveMetastore_create_database_presult__isset;
+
+class ThriftHiveMetastore_create_database_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_create_database_presult() throw();
+  AlreadyExistsException o1;
+  InvalidObjectException o2;
+  MetaException o3;
+
+  _ThriftHiveMetastore_create_database_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_get_database_args__isset {
+  _ThriftHiveMetastore_get_database_args__isset() : name(false) {}
+  bool name :1;
+} _ThriftHiveMetastore_get_database_args__isset;
+
+class ThriftHiveMetastore_get_database_args {
+ public:
+
+  ThriftHiveMetastore_get_database_args(const ThriftHiveMetastore_get_database_args&);
+  ThriftHiveMetastore_get_database_args& operator=(const ThriftHiveMetastore_get_database_args&);
+  ThriftHiveMetastore_get_database_args() : name() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_database_args() throw();
+  std::string name;
+
+  _ThriftHiveMetastore_get_database_args__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  bool operator == (const ThriftHiveMetastore_get_database_args & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_database_args &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_database_args & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+
+class ThriftHiveMetastore_get_database_pargs {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_database_pargs() throw();
+  const std::string* name;
+
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_database_result__isset {
+  _ThriftHiveMetastore_get_database_result__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_database_result__isset;
+
+class ThriftHiveMetastore_get_database_result {
+ public:
+
+  ThriftHiveMetastore_get_database_result(const ThriftHiveMetastore_get_database_result&);
+  ThriftHiveMetastore_get_database_result& operator=(const ThriftHiveMetastore_get_database_result&);
+  ThriftHiveMetastore_get_database_result() {
+  }
+
+  virtual ~ThriftHiveMetastore_get_database_result() throw();
+  Database success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_database_result__isset __isset;
+
+  void __set_success(const Database& val);
+
+  void __set_o1(const NoSuchObjectException& val);
+
+  void __set_o2(const MetaException& val);
+
+  bool operator == (const ThriftHiveMetastore_get_database_result & rhs) const
+  {
+    if (!(success == rhs.success))
+      return false;
+    if (!(o1 == rhs.o1))
+      return false;
+    if (!(o2 == rhs.o2))
+      return false;
+    return true;
+  }
+  bool operator != (const ThriftHiveMetastore_get_database_result &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const ThriftHiveMetastore_get_database_result & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+};
+
+typedef struct _ThriftHiveMetastore_get_database_presult__isset {
+  _ThriftHiveMetastore_get_database_presult__isset() : success(false), o1(false), o2(false) {}
+  bool success :1;
+  bool o1 :1;
+  bool o2 :1;
+} _ThriftHiveMetastore_get_database_presult__isset;
+
+class ThriftHiveMetastore_get_database_presult {
+ public:
+
+
+  virtual ~ThriftHiveMetastore_get_database_presult() throw();
+  Database* success;
+  NoSuchObjectException o1;
+  MetaException o2;
+
+  _ThriftHiveMetastore_get_database_presult__isset __isset;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+
+};
+
+typedef struct _ThriftHiveMetastore_drop_database_args__isset {
+  _ThriftHiveMetastore_drop_database_args__isset() : name(false), deleteData(false), cascade(false) {}
+  bool name :1;
+  bool deleteData :1;
+  bool cascade :1;
+} _ThriftHiveMetastore_drop_database_args__isset;
+
+class ThriftHiveMetastore_drop_database_args {
+ public:
+
+  ThriftHiveMetastore_drop_database_args(const ThriftHiveMetastore_drop_database_args&);
+  ThriftHiveMetastore_drop_database_args& operator=(const ThriftHiveMetastore_drop_database_args&);
+  ThriftHiveMetastore_drop_database_args() : name(), deleteData(0), cascade(0) {
+  }
+
+  virtua

<TRUNCATED>

[66/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
index 0000000,4a97f89..267c9e8
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MPartition.java
@@@ -1,0 -1,155 +1,162 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.model;
+ 
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class MPartition {
+ 
+   private String partitionName; // partitionname ==>  (key=value/)*(key=value)
+   private MTable table; 
+   private List<String> values;
+   private int createTime;
+   private int lastAccessTime;
+   private MStorageDescriptor sd;
+   private Map<String, String> parameters;
 -  
++  private long writeId;
+   
+   public MPartition() {}
+   
+   /**
+    * @param partitionName
+    * @param table
+    * @param values
+    * @param createTime
+    * @param lastAccessTime
+    * @param sd
+    * @param parameters
+    */
+   public MPartition(String partitionName, MTable table, List<String> values, int createTime,
+       int lastAccessTime, MStorageDescriptor sd, Map<String, String> parameters) {
+     this.partitionName = partitionName;
+     this.table = table;
+     this.values = values;
+     this.createTime = createTime;
+     this.lastAccessTime = lastAccessTime;
+     this.sd = sd;
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the lastAccessTime
+    */
+   public int getLastAccessTime() {
+     return lastAccessTime;
+   }
+ 
+   /**
+    * @param lastAccessTime the lastAccessTime to set
+    */
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+   }
+ 
+   /**
+    * @return the values
+    */
+   public List<String> getValues() {
+     return values;
+   }
+ 
+   /**
+    * @param values the values to set
+    */
+   public void setValues(List<String> values) {
+     this.values = values;
+   }
+ 
+   /**
+    * @return the table
+    */
+   public MTable getTable() {
+     return table;
+   }
+ 
+   /**
+    * @param table the table to set
+    */
+   public void setTable(MTable table) {
+     this.table = table;
+   }
+ 
+   /**
+    * @return the sd
+    */
+   public MStorageDescriptor getSd() {
+     return sd;
+   }
+ 
+   /**
+    * @param sd the sd to set
+    */
+   public void setSd(MStorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   /**
+    * @return the parameters
+    */
+   public Map<String, String> getParameters() {
+     return parameters;
+   }
+ 
+   /**
+    * @param parameters the parameters to set
+    */
+   public void setParameters(Map<String, String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the partitionName
+    */
+   public String getPartitionName() {
+     return partitionName;
+   }
+ 
+   /**
+    * @param partitionName the partitionName to set
+    */
+   public void setPartitionName(String partitionName) {
+     this.partitionName = partitionName;
+   }
+ 
+   /**
+    * @return the createTime
+    */
+   public int getCreateTime() {
+     return createTime;
+   }
+ 
+   /**
+    * @param createTime the createTime to set
+    */
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+   }
+ 
++  public long getWriteId() {
++    return writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++  }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
index 0000000,38ad479..deeb971
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/model/MTable.java
@@@ -1,0 -1,273 +1,283 @@@
++
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore.model;
+ 
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class MTable {
+   
+   private String tableName;
+   private MDatabase database;
+   private MStorageDescriptor sd;
+   private String owner;
+   private String ownerType;
+   private int createTime;
+   private int lastAccessTime;
+   private int retention;
+   private List<MFieldSchema> partitionKeys;
+   private Map<String, String> parameters;
+   private String viewOriginalText;
+   private String viewExpandedText;
+   private boolean rewriteEnabled;
+   private String tableType;
++  private long writeId;
+ 
+   public MTable() {}
+ 
+   /**
+    * @param tableName
+    * @param database
+    * @param sd
+    * @param owner
+    * @param ownerType
+    * @param createTime
+    * @param lastAccessTime
+    * @param retention
+    * @param partitionKeys
+    * @param parameters
+    * @param viewOriginalText
+    * @param viewExpandedText
+    * @param tableType
+    */
+   public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, String ownerType,
+       int createTime, int lastAccessTime, int retention, List<MFieldSchema> partitionKeys,
+       Map<String, String> parameters, String viewOriginalText, String viewExpandedText,
+       boolean rewriteEnabled, String tableType) {
+     this.tableName = tableName;
+     this.database = database;
+     this.sd = sd;
+     this.owner = owner;
+     this.ownerType = ownerType;
+     this.createTime = createTime;
+     this.setLastAccessTime(lastAccessTime);
+     this.retention = retention;
+     this.partitionKeys = partitionKeys;
+     this.parameters = parameters;
+     this.viewOriginalText = viewOriginalText;
+     this.viewExpandedText = viewExpandedText;
+     this.rewriteEnabled = rewriteEnabled;
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * @return the tableName
+    */
+   public String getTableName() {
+     return tableName;
+   }
+ 
+   /**
+    * @param tableName the tableName to set
+    */
+   public void setTableName(String tableName) {
+     this.tableName = tableName;
+   }
+ 
+   /**
+    * @return the sd
+    */
+   public MStorageDescriptor getSd() {
+     return sd;
+   }
+ 
+   /**
+    * @param sd the sd to set
+    */
+   public void setSd(MStorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   /**
+    * @return the partKeys
+    */
+   public List<MFieldSchema> getPartitionKeys() {
+     return partitionKeys;
+   }
+ 
+   /**
+    * @param partKeys the partKeys to set
+    */
+   public void setPartitionKeys(List<MFieldSchema> partKeys) {
+     this.partitionKeys = partKeys;
+   }
+ 
+   /**
+    * @return the parameters
+    */
+   public Map<String, String> getParameters() {
+     return parameters;
+   }
+ 
+   /**
+    * @param parameters the parameters to set
+    */
+   public void setParameters(Map<String, String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   /**
+    * @return the original view text, or null if this table is not a view
+    */
+   public String getViewOriginalText() {
+     return viewOriginalText;
+   }
+ 
+   /**
+    * @param viewOriginalText the original view text to set
+    */
+   public void setViewOriginalText(String viewOriginalText) {
+     this.viewOriginalText = viewOriginalText;
+   }
+ 
+   /**
+    * @return the expanded view text, or null if this table is not a view
+    */
+   public String getViewExpandedText() {
+     return viewExpandedText;
+   }
+ 
+   /**
+    * @param viewExpandedText the expanded view text to set
+    */
+   public void setViewExpandedText(String viewExpandedText) {
+     this.viewExpandedText = viewExpandedText;
+   }
+ 
+   /**
+    * @return whether the view can be used for rewriting queries
+    */
+   public boolean isRewriteEnabled() {
+     return rewriteEnabled;
+   }
+ 
+   /**
+    * @param rewriteEnabled whether the view can be used for rewriting queries
+    */
+   public void setRewriteEnabled(boolean rewriteEnabled) {
+     this.rewriteEnabled = rewriteEnabled;
+   }
+ 
+   /**
+    * @return the owner
+    */
+   public String getOwner() {
+     return owner;
+   }
+ 
+   /**
+    * @param owner the owner to set
+    */
+   public void setOwner(String owner) {
+     this.owner = owner;
+   }
+ 
+   /**
+    * @return the owner type
+    */
+   public String getOwnerType() {
+     return ownerType;
+   }
+ 
+   /**
+    * @param ownerType the owner type to set
+    */
+   public void setOwnerType(String ownerType) {
+     this.ownerType = ownerType;
+   }
+ 
+   /**
+    * @return the createTime
+    */
+   public int getCreateTime() {
+     return createTime;
+   }
+ 
+   /**
+    * @param createTime the createTime to set
+    */
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+   }
+ 
+   /**
+    * @return the database
+    */
+   public MDatabase getDatabase() {
+     return database;
+   }
+ 
+   /**
+    * @param database the database to set
+    */
+   public void setDatabase(MDatabase database) {
+     this.database = database;
+   }
+ 
+   /**
+    * @return the retention
+    */
+   public int getRetention() {
+     return retention;
+   }
+ 
+   /**
+    * @param retention the retention to set
+    */
+   public void setRetention(int retention) {
+     this.retention = retention;
+   }
+ 
+   /**
+    * @param lastAccessTime the lastAccessTime to set
+    */
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+   }
+ 
+   /**
+    * @return the lastAccessTime
+    */
+   public int getLastAccessTime() {
+     return lastAccessTime;
+   }
+ 
+   /**
+    * @param tableType the tableType to set
+    */
+   public void setTableType(String tableType) {
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * @return the tableType
+    */
+   public String getTableType() {
+     return tableType;
+   }
++
++  public long getWriteId() {
++    return writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++  }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
index 0000000,4e3068d..1f559e9
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/CompactionTxnHandler.java
@@@ -1,0 -1,1107 +1,1158 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
++import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.api.CompactionType;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.util.StringUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.sql.Connection;
+ import java.sql.PreparedStatement;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ 
+ /**
+  * Extends the transaction handler with methods needed only by the compactor threads.  These
+  * methods are not available through the thrift interface.
+  */
+ class CompactionTxnHandler extends TxnHandler {
+   static final private String CLASS_NAME = CompactionTxnHandler.class.getName();
+   static final private Logger LOG = LoggerFactory.getLogger(CLASS_NAME);
+ 
+   public CompactionTxnHandler() {
+   }
+ 
+   /**
+    * This will look through the completed_txn_components table and look for partitions or tables
+    * that may be ready for compaction.  Also, look through txns and txn_components tables for
+    * aborted transactions that we should add to the list.
+    * @param maxAborted Maximum number of aborted queries to allow before marking this as a
+    *                   potential compaction.
+    * @return list of CompactionInfo structs.  These will not have id, type,
+    * or runAs set since these are only potential compactions not actual ones.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException {
+     Connection dbConn = null;
+     Set<CompactionInfo> response = new HashSet<>();
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         // Check for completed transactions
+         String s = "select distinct ctc_database, ctc_table, " +
+           "ctc_partition from COMPLETED_TXN_COMPONENTS";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.dbname = rs.getString(1);
+           info.tableName = rs.getString(2);
+           info.partName = rs.getString(3);
+           response.add(info);
+         }
+         rs.close();
+ 
+         // Check for aborted txns
+         s = "select tc_database, tc_table, tc_partition " +
+           "from TXNS, TXN_COMPONENTS " +
+           "where txn_id = tc_txnid and txn_state = '" + TXN_ABORTED + "' " +
+           "group by tc_database, tc_table, tc_partition " +
+           "having count(*) > " + maxAborted;
+ 
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.dbname = rs.getString(1);
+           info.tableName = rs.getString(2);
+           info.partName = rs.getString(3);
+           info.tooManyAborts = true;
+           response.add(info);
+         }
+ 
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+       } catch (SQLException e) {
+         LOG.error("Unable to connect to transaction database " + e.getMessage());
+         checkRetryable(dbConn, e, "findPotentialCompactions(maxAborted:" + maxAborted + ")");
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+       return response;
+     }
+     catch (RetryException e) {
+       return findPotentialCompactions(maxAborted);
+     }
+   }
+ 
+   /**
+    * Sets the user to run as.  This is for the case
+    * where the request was generated by the user and so the worker must set this value later.
+    * @param cq_id id of this entry in the queue
+    * @param user user to run the jobs as
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void setRunAs(long cq_id, String user) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_run_as = '" + user + "' where cq_id = " + cq_id;
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCnt = stmt.executeUpdate(s);
+         if (updCnt != 1) {
+           LOG.error("Unable to set cq_run_as=" + user + " for compaction record with cq_id=" + cq_id + ".  updCnt=" + updCnt);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to update compaction queue, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "setRunAs(cq_id:" + cq_id + ",user:" + user +")");
+       } finally {
+         closeDbConn(dbConn);
+         closeStmt(stmt);
+       }
+     } catch (RetryException e) {
+       setRunAs(cq_id, user);
+     }
+   }
+ 
+   /**
+    * This will grab the next compaction request off of
+    * the queue, and assign it to the worker.
+    * @param workerId id of the worker calling this, will be recorded in the db
+    * @return an info element for this compaction request, or null if there is no work to do now.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public CompactionInfo findNextToCompact(String workerId) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       //need a separate stmt for executeUpdate() otherwise it will close the ResultSet(HIVE-12725)
+       Statement updStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select cq_id, cq_database, cq_table, cq_partition, " +
+           "cq_type, cq_tblproperties from COMPACTION_QUEUE where cq_state = '" + INITIATED_STATE + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           LOG.debug("No compactions found ready to compact");
+           dbConn.rollback();
+           return null;
+         }
+         updStmt = dbConn.createStatement();
+         do {
+           CompactionInfo info = new CompactionInfo();
+           info.id = rs.getLong(1);
+           info.dbname = rs.getString(2);
+           info.tableName = rs.getString(3);
+           info.partName = rs.getString(4);
+           info.type = dbCompactionType2ThriftType(rs.getString(5).charAt(0));
+           info.properties = rs.getString(6);
+           // Now, update this record as being worked on by this worker.
+           long now = getDbTime(dbConn);
+           s = "update COMPACTION_QUEUE set cq_worker_id = '" + workerId + "', " +
+             "cq_start = " + now + ", cq_state = '" + WORKING_STATE + "' where cq_id = " + info.id +
+             " AND cq_state='" + INITIATED_STATE + "'";
+           LOG.debug("Going to execute update <" + s + ">");
+           int updCount = updStmt.executeUpdate(s);
+           if(updCount == 1) {
+             dbConn.commit();
+             return info;
+           }
+           if(updCount == 0) {
+             LOG.debug("Another Worker picked up " + info);
+             continue;
+           }
+           LOG.error("Unable to set to cq_state=" + WORKING_STATE + " for compaction record: " +
+             info + ". updCnt=" + updCount + ".");
+           dbConn.rollback();
+           return null;
+         } while( rs.next());
+         dbConn.rollback();
+         return null;
+       } catch (SQLException e) {
+         LOG.error("Unable to select next element for compaction, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findNextToCompact(workerId:" + workerId + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(updStmt);
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return findNextToCompact(workerId);
+     }
+   }
+ 
+   /**
+    * This will mark an entry in the queue as compacted
+    * and put it in the ready to clean state.
+    * @param info info on the compaction entry to mark as compacted.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void markCompacted(CompactionInfo info) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_state = '" + READY_FOR_CLEANING + "', " +
+           "cq_worker_id = null where cq_id = " + info.id;
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCnt = stmt.executeUpdate(s);
+         if (updCnt != 1) {
+           LOG.error("Unable to set cq_state=" + READY_FOR_CLEANING + " for compaction record: " + info + ". updCnt=" + updCnt);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to update compaction queue " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "markCompacted(" + info + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       markCompacted(info);
+     }
+   }
+ 
+   /**
+    * Find entries in the queue that are ready to
+    * be cleaned.
+    * @return information on the entry in the queue.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public List<CompactionInfo> findReadyToClean() throws MetaException {
+     Connection dbConn = null;
+     List<CompactionInfo> rc = new ArrayList<>();
+ 
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select cq_id, cq_database, cq_table, cq_partition, "
+                 + "cq_type, cq_run_as, cq_highest_write_id from COMPACTION_QUEUE where cq_state = '"
+                 + READY_FOR_CLEANING + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         while (rs.next()) {
+           CompactionInfo info = new CompactionInfo();
+           info.id = rs.getLong(1);
+           info.dbname = rs.getString(2);
+           info.tableName = rs.getString(3);
+           info.partName = rs.getString(4);
+           switch (rs.getString(5).charAt(0)) {
+             case MAJOR_TYPE: info.type = CompactionType.MAJOR; break;
+             case MINOR_TYPE: info.type = CompactionType.MINOR; break;
+             default: throw new MetaException("Unexpected compaction type " + rs.getString(5));
+           }
+           info.runAs = rs.getString(6);
+           info.highestWriteId = rs.getLong(7);
+           rc.add(info);
+         }
+         LOG.debug("Going to rollback");
+         dbConn.rollback();
+         return rc;
+       } catch (SQLException e) {
+         LOG.error("Unable to select next element for cleaning, " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findReadyToClean");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return findReadyToClean();
+     }
+   }
+ 
+   /**
+    * This will remove an entry from the queue after
+    * it has been compacted.
+    * 
+    * @param info info on the compaction entry to remove
+    */
+   @Override
+   @RetrySemantics.CannotRetry
+   public void markCleaned(CompactionInfo info) throws MetaException {
+     try {
+       Connection dbConn = null;
+       PreparedStatement pStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+         pStmt.setLong(1, info.id);
+         rs = pStmt.executeQuery();
+         if(rs.next()) {
+           info = CompactionInfo.loadFullFromCompactionQueue(rs);
+         }
+         else {
+           throw new IllegalStateException("No record with CQ_ID=" + info.id + " found in COMPACTION_QUEUE");
+         }
+         close(rs);
+         String s = "delete from COMPACTION_QUEUE where cq_id = ?";
+         pStmt = dbConn.prepareStatement(s);
+         pStmt.setLong(1, info.id);
+         LOG.debug("Going to execute update <" + s + ">");
+         int updCount = pStmt.executeUpdate();
+         if (updCount != 1) {
+           LOG.error("Unable to delete compaction record: " + info +  ".  Update count=" + updCount);
+           LOG.debug("Going to rollback");
+           dbConn.rollback();
+         }
+         pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+         info.state = SUCCEEDED_STATE;
+         CompactionInfo.insertIntoCompletedCompactions(pStmt, info, getDbTime(dbConn));
+         updCount = pStmt.executeUpdate();
+ 
+         // Remove entries from completed_txn_components as well, so we don't start looking there
+         // again but only up to the highest write ID include in this compaction job.
+         //highestWriteId will be NULL in upgrade scenarios
+         s = "delete from COMPLETED_TXN_COMPONENTS where ctc_database = ? and " +
+             "ctc_table = ?";
+         if (info.partName != null) {
+           s += " and ctc_partition = ?";
+         }
+         if(info.highestWriteId != 0) {
+           s += " and ctc_writeid <= ?";
+         }
+         pStmt = dbConn.prepareStatement(s);
+         int paramCount = 1;
+         pStmt.setString(paramCount++, info.dbname);
+         pStmt.setString(paramCount++, info.tableName);
+         if (info.partName != null) {
+           pStmt.setString(paramCount++, info.partName);
+         }
+         if(info.highestWriteId != 0) {
+           pStmt.setLong(paramCount++, info.highestWriteId);
+         }
+         LOG.debug("Going to execute update <" + s + ">");
+         if (pStmt.executeUpdate() < 1) {
+           LOG.error("Expected to remove at least one row from completed_txn_components when " +
+             "marking compaction entry as clean!");
+         }
+ 
+         s = "select distinct txn_id from TXNS, TXN_COMPONENTS where txn_id = tc_txnid and txn_state = '" +
+           TXN_ABORTED + "' and tc_database = ? and tc_table = ?";
+         if (info.highestWriteId != 0) s += " and tc_writeid <= ?";
+         if (info.partName != null) s += " and tc_partition = ?";
+ 
+         pStmt = dbConn.prepareStatement(s);
+         paramCount = 1;
+         pStmt.setString(paramCount++, info.dbname);
+         pStmt.setString(paramCount++, info.tableName);
+         if(info.highestWriteId != 0) {
+           pStmt.setLong(paramCount++, info.highestWriteId);
+         }
+         if (info.partName != null) {
+           pStmt.setString(paramCount++, info.partName);
+         }
+ 
+         LOG.debug("Going to execute update <" + s + ">");
+         rs = pStmt.executeQuery();
+         List<Long> txnids = new ArrayList<>();
+         List<String> questions = new ArrayList<>();
+         while (rs.next()) {
+           long id = rs.getLong(1);
+           txnids.add(id);
+           questions.add("?");
+         }
+         // Remove entries from txn_components, as there may be aborted txn components
+         if (txnids.size() > 0) {
+           List<String> queries = new ArrayList<>();
+ 
+           // Prepare prefix and suffix
+           StringBuilder prefix = new StringBuilder();
+           StringBuilder suffix = new StringBuilder();
+ 
+           prefix.append("delete from TXN_COMPONENTS where ");
+ 
+           //because 1 txn may include different partitions/tables even in auto commit mode
+           suffix.append(" and tc_database = ?");
+           suffix.append(" and tc_table = ?");
+           if (info.partName != null) {
+             suffix.append(" and tc_partition = ?");
+           }
+ 
+           // Populate the complete query with provided prefix and suffix
+           List<Integer> counts = TxnUtils
+               .buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "tc_txnid",
+                   true, false);
+           int totalCount = 0;
+           for (int i = 0; i < queries.size(); i++) {
+             String query = queries.get(i);
+             int insertCount = counts.get(i);
+ 
+             LOG.debug("Going to execute update <" + query + ">");
+             pStmt = dbConn.prepareStatement(query);
+             for (int j = 0; j < insertCount; j++) {
+               pStmt.setLong(j + 1, txnids.get(totalCount + j));
+             }
+             totalCount += insertCount;
+             paramCount = insertCount + 1;
+             pStmt.setString(paramCount++, info.dbname);
+             pStmt.setString(paramCount++, info.tableName);
+             if (info.partName != null) {
+               pStmt.setString(paramCount++, info.partName);
+             }
+             int rc = pStmt.executeUpdate();
+             LOG.debug("Removed " + rc + " records from txn_components");
+ 
+             // Don't bother cleaning from the txns table.  A separate call will do that.  We don't
+             // know here which txns still have components from other tables or partitions in the
+             // table, so we don't know which ones we can and cannot clean.
+           }
+         }
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from compaction queue " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "markCleaned(" + info + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       markCleaned(info);
+     }
+   }
+ 
+   /**
+    * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by
+    * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)).
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void cleanTxnToWriteIdTable() throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+ 
+       try {
+         // We query for minimum values in all the queries and they can only increase by any concurrent
+         // operations. So, READ COMMITTED is sufficient.
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+ 
+         // First need to find the min_uncommitted_txnid which is currently seen by any open transactions.
+         // If there are no txns which are currently open or aborted in the system, then current value of
+         // NEXT_TXN_ID.ntxn_next could be min_uncommitted_txnid.
+         String s = "select ntxn_next from NEXT_TXN_ID";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (!rs.next()) {
+           throw new MetaException("Transaction tables not properly " +
+                   "initialized, no record found in next_txn_id");
+         }
+         long minUncommittedTxnId = rs.getLong(1);
+ 
+         // If there are any open txns, then the minimum of min_open_txnid from MIN_HISTORY_LEVEL table
+         // could be the min_uncommitted_txnid if lesser than NEXT_TXN_ID.ntxn_next.
+         s = "select min(mhl_min_open_txnid) from MIN_HISTORY_LEVEL";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long minOpenTxnId = rs.getLong(1);
+           if (minOpenTxnId > 0) {
+             minUncommittedTxnId = Math.min(minOpenTxnId, minUncommittedTxnId);
+           }
+         }
+ 
+         // If there are aborted txns, then the minimum aborted txnid could be the min_uncommitted_txnid
+         // if lesser than both NEXT_TXN_ID.ntxn_next and min(MIN_HISTORY_LEVEL .mhl_min_open_txnid).
+         s = "select min(txn_id) from TXNS where txn_state = " + quoteChar(TXN_ABORTED);
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         if (rs.next()) {
+           long minAbortedTxnId = rs.getLong(1);
+           if (minAbortedTxnId > 0) {
+             minUncommittedTxnId = Math.min(minAbortedTxnId, minUncommittedTxnId);
+           }
+         }
+ 
+         // As all txns below min_uncommitted_txnid are either committed or empty_aborted, we are allowed
+         // to cleanup the entries less than min_uncommitted_txnid from the TXN_TO_WRITE_ID table.
+         s = "delete from TXN_TO_WRITE_ID where t2w_txnid < " + minUncommittedTxnId;
+         LOG.debug("Going to execute delete <" + s + ">");
+         int rc = stmt.executeUpdate(s);
+         LOG.info("Removed " + rc + " rows from TXN_TO_WRITE_ID with Txn Low-Water-Mark: " + minUncommittedTxnId);
+ 
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from txns table " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanTxnToWriteIdTable");
+         throw new MetaException("Unable to connect to transaction database " +
+                 StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       cleanTxnToWriteIdTable();
+     }
+   }
+ 
+   /**
+    * Clean up aborted transactions from txns that have no components in txn_components. The reason such
+    * txns exist can be that now work was done in this txn (e.g. Streaming opened TransactionBatch and
+    * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void cleanEmptyAbortedTxns() throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       ResultSet rs = null;
+       try {
+         //Aborted is a terminal state, so nothing about the txn can change
+         //after that, so READ COMMITTED is sufficient.
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "select txn_id from TXNS where " +
 -          "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
 -          "txn_state = '" + TXN_ABORTED + "'";
++            "txn_id not in (select tc_txnid from TXN_COMPONENTS) and " +
++            "txn_state = '" + TXN_ABORTED + "'";
+         LOG.debug("Going to execute query <" + s + ">");
+         rs = stmt.executeQuery(s);
+         List<Long> txnids = new ArrayList<>();
+         while (rs.next()) txnids.add(rs.getLong(1));
+         close(rs);
+         if(txnids.size() <= 0) {
+           return;
+         }
+         Collections.sort(txnids);//easier to read logs
++
+         List<String> queries = new ArrayList<>();
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
++        // Turn off COLUMN_STATS_ACCURATE for txnids' components in TBLS and PARTITIONS
++        prefix.append("select tbl_id from TBLS inner join DBS on TBLS.DB_ID = DBS.DB_ID "
++            + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME"
++            + " and t2w_writeid = TBLS.WRITE_ID where ");
++        suffix.append("");
++        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false);
++
++        // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from TABLE_PARAMS for the txnids.
++        List<StringBuilder> finalCommands = new ArrayList<>(queries.size());
++        for (int i = 0; i < queries.size(); i++) {
++          String query = queries.get(i);
++          finalCommands.add(i, new StringBuilder("delete from TABLE_PARAMS " +
++                  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and tbl_id in ("));
++          finalCommands.get(i).append(query + ")");
++          LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
++          int rc = stmt.executeUpdate(finalCommands.get(i).toString());
++          LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from TBLS");
++        }
++
++        queries.clear();
++        prefix.setLength(0);
++        suffix.setLength(0);
++        finalCommands.clear();
++
++        // Delete COLUMN_STATS_ACCURATE.BASIC_STATS rows from PARTITIONS_PARAMS for the txnids.
++        prefix.append("select part_id from PARTITIONS "
++            + "inner join TBLS on PARTITIONS.TBL_ID = TBLS.TBL_ID "
++            + "inner join DBS on TBLS.DB_ID = DBS.DB_ID "
++            + "inner join TXN_TO_WRITE_ID on t2w_database = DBS.NAME and t2w_table = TBLS.TBL_NAME"
++            + " and t2w_writeid = TBLS.WRITE_ID where ");
++        suffix.append("");
++        TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "t2w_txnid", true, false);
++
++        for (int i = 0; i < queries.size(); i++) {
++          String query = queries.get(i);
++          finalCommands.add(i, new StringBuilder("delete from PARTITION_PARAMS " +
++                  " where param_key = '" + "COLUMN_STATS_ACCURATE" + "' and part_id in ("));
++          finalCommands.get(i).append(query + ")");
++          LOG.debug("Going to execute update <" + finalCommands.get(i) + ">");
++          int rc = stmt.executeUpdate(finalCommands.get(i).toString());
++          LOG.info("Turned off " + rc + " COLUMN_STATE_ACCURATE.BASIC_STATS states from PARTITIONS");
++        }
++
++        queries.clear();
++        prefix.setLength(0);
++        suffix.setLength(0);
++        finalCommands.clear();
++
++        // Delete from TXNS.
+         prefix.append("delete from TXNS where ");
+         suffix.append("");
+ 
+         TxnUtils.buildQueryWithINClause(conf, queries, prefix, suffix, txnids, "txn_id", false, false);
+ 
+         for (String query : queries) {
+           LOG.debug("Going to execute update <" + query + ">");
+           int rc = stmt.executeUpdate(query);
+           LOG.info("Removed " + rc + "  empty Aborted transactions from TXNS");
+         }
+         LOG.info("Aborted transactions removed from TXNS: " + txnids);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to delete from txns table " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "cleanEmptyAbortedTxns");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       cleanEmptyAbortedTxns();
+     }
+   }
+ 
+   /**
+    * This will take all entries assigned to workers
+    * on a host return them to INITIATED state.  The initiator should use this at start up to
+    * clean entries from any workers that were in the middle of compacting when the metastore
+    * shutdown.  It does not reset entries from worker threads on other hosts as those may still
+    * be working.
+    * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
+    *                 so that like hostname% will match the worker id.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void revokeFromLocalWorkers(String hostname) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
+           + INITIATED_STATE+ "' where cq_state = '" + WORKING_STATE + "' and cq_worker_id like '"
+           +  hostname + "%'";
+         LOG.debug("Going to execute update <" + s + ">");
+         // It isn't an error if the following returns no rows, as the local workers could have died
+         // with  nothing assigned to them.
+         stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to change dead worker's records back to initiated state " +
+           e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "revokeFromLocalWorkers(hostname:" + hostname +")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       revokeFromLocalWorkers(hostname);
+     }
+   }
+ 
+   /**
+    * This call will return all compaction queue
+    * entries assigned to a worker but over the timeout back to the initiated state.
+    * This should be called by the initiator on start up and occasionally when running to clean up
+    * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
+    * first.
+    * @param timeout number of milliseconds since start time that should elapse before a worker is
+    *                declared dead.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void revokeTimedoutWorkers(long timeout) throws MetaException {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         long latestValidStart = getDbTime(dbConn) - timeout;
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set cq_worker_id = null, cq_start = null, cq_state = '"
+           + INITIATED_STATE+ "' where cq_state = '" + WORKING_STATE + "' and cq_start < "
+           +  latestValidStart;
+         LOG.debug("Going to execute update <" + s + ">");
+         // It isn't an error if the following returns no rows, as the local workers could have died
+         // with  nothing assigned to them.
+         stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.error("Unable to change dead worker's records back to initiated state " +
+           e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "revokeTimedoutWorkers(timeout:" + timeout + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         closeStmt(stmt);
+         closeDbConn(dbConn);
+       }
+     } catch (RetryException e) {
+       revokeTimedoutWorkers(timeout);
+     }
+   }
+ 
+   /**
+    * Queries metastore DB directly to find columns in the table which have statistics information.
+    * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+    * table level stats are examined.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException {
+     Connection dbConn = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         String quote = getIdentifierQuoteString(dbConn);
+         StringBuilder bldr = new StringBuilder();
+         bldr.append("SELECT ").append(quote).append("COLUMN_NAME").append(quote)
+           .append(" FROM ")
+           .append(quote).append((ci.partName == null ? "TAB_COL_STATS" : "PART_COL_STATS"))
+           .append(quote)
+           .append(" WHERE ")
+           .append(quote).append("DB_NAME").append(quote).append(" = ?")
+           .append(" AND ").append(quote).append("TABLE_NAME").append(quote)
+           .append(" = ?");
+         if (ci.partName != null) {
+           bldr.append(" AND ").append(quote).append("PARTITION_NAME").append(quote).append(" = ?");
+         }
+         String s = bldr.toString();
+         pStmt = dbConn.prepareStatement(s);
+         pStmt.setString(1, ci.dbname);
+         pStmt.setString(2, ci.tableName);
+         if (ci.partName != null) {
+           pStmt.setString(3, ci.partName);
+         }
+ 
+       /*String s = "SELECT COLUMN_NAME FROM " + (ci.partName == null ? "TAB_COL_STATS" :
+           "PART_COL_STATS")
+          + " WHERE DB_NAME='" + ci.dbname + "' AND TABLE_NAME='" + ci.tableName + "'"
+         + (ci.partName == null ? "" : " AND PARTITION_NAME='" + ci.partName + "'");*/
+         LOG.debug("Going to execute <" + s + ">");
+         rs = pStmt.executeQuery();
+         List<String> columns = new ArrayList<>();
+         while (rs.next()) {
+           columns.add(rs.getString(1));
+         }
+         LOG.debug("Found columns to update stats: " + columns + " on " + ci.tableName +
+           (ci.partName == null ? "" : "/" + ci.partName));
+         dbConn.commit();
+         return columns;
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "findColumnsWithStats(" + ci.tableName +
+           (ci.partName == null ? "" : "/" + ci.partName) + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException ex) {
+       return findColumnsWithStats(ci);
+     }
+   }
+ 
+   /**
+    * Record the highest txn id that the {@code ci} compaction job will pay attention to.
+    * This is the highest resolved txn id, i.e. such that there are no open txns with lower ids.
+    */
+   @Override
+   @RetrySemantics.Idempotent
+   public void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException {
+     Connection dbConn = null;
+     Statement stmt = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         int updCount = stmt.executeUpdate("UPDATE COMPACTION_QUEUE SET CQ_HIGHEST_WRITE_ID = " + highestWriteId +
+           " WHERE CQ_ID = " + ci.id);
+         if(updCount != 1) {
+           throw new IllegalStateException("Could not find record in COMPACTION_QUEUE for " + ci);
+         }
+         dbConn.commit();
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "setCompactionHighestWriteId(" + ci + "," + highestWriteId + ")");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException ex) {
+       setCompactionHighestWriteId(ci, highestWriteId);
+     }
+   }
+   private static class RetentionCounters {
+     int attemptedRetention = 0;
+     int failedRetention = 0;
+     int succeededRetention = 0;
+     RetentionCounters(int attemptedRetention, int failedRetention, int succeededRetention) {
+       this.attemptedRetention = attemptedRetention;
+       this.failedRetention = failedRetention;
+       this.succeededRetention = succeededRetention;
+     }
+   }
+   private void checkForDeletion(List<Long> deleteSet, CompactionInfo ci, RetentionCounters rc) {
+     switch (ci.state) {
+       case ATTEMPTED_STATE:
+         if(--rc.attemptedRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       case FAILED_STATE:
+         if(--rc.failedRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       case SUCCEEDED_STATE:
+         if(--rc.succeededRetention < 0) {
+           deleteSet.add(ci.id);
+         }
+         break;
+       default:
+         //do nothing to hanlde future RU/D where we may want to add new state types
+     }
+   }
+ 
+   /**
+    * For any given compactable entity (partition; table if not partitioned) the history of compactions
+    * may look like "sssfffaaasffss", for example.  The idea is to retain the tail (most recent) of the
+    * history such that a configurable number of each type of state is present.  Any other entries
+    * can be purged.  This scheme has advantage of always retaining the last failure/success even if
+    * it's not recent.
+    * @throws MetaException
+    */
+   @Override
+   @RetrySemantics.SafeToRetry
+   public void purgeCompactionHistory() throws MetaException {
+     Connection dbConn = null;
+     Statement stmt = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     List<Long> deleteSet = new ArrayList<>();
+     RetentionCounters rc = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         /*cc_id is monotonically increasing so for any entity sorts in order of compaction history,
+         thus this query groups by entity and withing group sorts most recent first*/
+         rs = stmt.executeQuery("select cc_id, cc_database, cc_table, cc_partition, cc_state from " +
+           "COMPLETED_COMPACTIONS order by cc_database, cc_table, cc_partition, cc_id desc");
+         String lastCompactedEntity = null;
+         /*In each group, walk from most recent and count occurences of each state type.  Once you
+         * have counted enough (for each state) to satisfy retention policy, delete all other
+         * instances of this status.*/
+         while(rs.next()) {
+           CompactionInfo ci = new CompactionInfo(rs.getLong(1), rs.getString(2), rs.getString(3), rs.getString(4), rs.getString(5).charAt(0));
+           if(!ci.getFullPartitionName().equals(lastCompactedEntity)) {
+             lastCompactedEntity = ci.getFullPartitionName();
+             rc = new RetentionCounters(MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED),
+               getFailedCompactionRetention(),
+               MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_SUCCEEDED));
+           }
+           checkForDeletion(deleteSet, ci, rc);
+         }
+         close(rs);
+ 
+         if (deleteSet.size() <= 0) {
+           return;
+         }
+ 
+         List<String> queries = new ArrayList<>();
+ 
+         StringBuilder prefix = new StringBuilder();
+         StringBuilder suffix = new StringBuilder();
+ 
+         prefix.append("delete from COMPLETED_COMPACTIONS where ");
+         suffix.append("");
+ 
+         List<String> questions = new ArrayList<>(deleteSet.size());
+         for (int  i = 0; i < deleteSet.size(); i++) {
+           questions.add("?");
+         }
+         List<Integer> counts = TxnUtils.buildQueryWithINClauseStrings(conf, queries, prefix, suffix, questions, "cc_id", false, false);
+         int totalCount = 0;
+         for (int i = 0; i < queries.size(); i++) {
+           String query = queries.get(i);
+           long insertCount = counts.get(i);
+           LOG.debug("Going to execute update <" + query + ">");
+           pStmt = dbConn.prepareStatement(query);
+           for (int j = 0; j < insertCount; j++) {
+             pStmt.setLong(j + 1, deleteSet.get(totalCount + j));
+           }
+           totalCount += insertCount;
+           int count = pStmt.executeUpdate();
+           LOG.debug("Removed " + count + " records from COMPLETED_COMPACTIONS");
+         }
+         dbConn.commit();
+       } catch (SQLException e) {
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "purgeCompactionHistory()");
+         throw new MetaException("Unable to connect to transaction database " +
+           StringUtils.stringifyException(e));
+       } finally {
+         close(rs, stmt, dbConn);
+         closeStmt(pStmt);
+       }
+     } catch (RetryException ex) {
+       purgeCompactionHistory();
+     }
+   }
+   /**
+    * this ensures that the number of failed compaction entries retained is > than number of failed
+    * compaction threshold which prevents new compactions from being scheduled.
+    */
+   private int getFailedCompactionRetention() {
+     int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+     int failedRetention = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED);
+     if(failedRetention < failedThreshold) {
+       LOG.warn("Invalid configuration " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.getVarname() +
+         "=" + failedRetention + " < " + ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED + "=" +
+         failedRetention + ".  Will use " + ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD.getVarname() +
+         "=" + failedRetention);
+       failedRetention = failedThreshold;
+     }
+     return failedRetention;
+   }
+   /**
+    * Returns {@code true} if there already exists sufficient number of consecutive failures for
+    * this table/partition so that no new automatic compactions will be scheduled.
+    * User initiated compactions don't do this check.
+    *
+    * Do we allow compacting whole table (when it's partitioned)?  No, though perhaps we should.
+    * That would be a meta operations, i.e. first find all partitions for this table (which have 
+    * txn info) and schedule each compaction separately.  This avoids complications in this logic.
+    */
+   @Override
+   @RetrySemantics.ReadOnly
+   public boolean checkFailedCompactions(CompactionInfo ci) throws MetaException {
+     Connection dbConn = null;
+     PreparedStatement pStmt = null;
+     ResultSet rs = null;
+     try {
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         pStmt = dbConn.prepareStatement("select CC_STATE from COMPLETED_COMPACTIONS where " +
+           "CC_DATABASE = ? and " +
+           "CC_TABLE = ? " +
+           (ci.partName != null ? "and CC_PARTITION = ?" : "") +
+           " and CC_STATE != " + quoteChar(ATTEMPTED_STATE) + " order by CC_ID desc");
+         pStmt.setString(1, ci.dbname);
+         pStmt.setString(2, ci.tableName);
+         if (ci.partName != null) {
+           pStmt.setString(3, ci.partName);
+         }
+         rs = pStmt.executeQuery();
+         int numFailed = 0;
+         int numTotal = 0;
+         int failedThreshold = MetastoreConf.getIntVar(conf, ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD);
+         while(rs.next() && ++numTotal <= failedThreshold) {
+           if(rs.getString(1).charAt(0) == FAILED_STATE) {
+             numFailed++;
+           }
+           else {
+             numFailed--;
+           }
+         }
+         return numFailed == failedThreshold;
+       }
+       catch (SQLException e) {
+         LOG.error("Unable to check for failed compactions " + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         checkRetryable(dbConn, e, "checkFailedCompactions(" + ci + ")");
+         LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(e));
+         return false;//weren't able to check
+       } finally {
+         close(rs, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       return checkFailedCompactions(ci);
+     }
+   }
+   /**
+    * If there is an entry in compaction_queue with ci.id, remove it
+    * Make entry in completed_compactions with status 'f'.
+    * If there is no entry in compaction_queue, it means Initiator failed to even schedule a compaction,
+    * which we record as ATTEMPTED_STATE entry in history.
+    */
+   @Override
+   @RetrySemantics.CannotRetry
+   public void markFailed(CompactionInfo ci) throws MetaException {//todo: this should not throw
+     //todo: this should take "comment" as parameter to set in CC_META_INFO to provide some context for the failure
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       PreparedStatement pStmt = null;
+       ResultSet rs = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         pStmt = dbConn.prepareStatement("select CQ_ID, CQ_DATABASE, CQ_TABLE, CQ_PARTITION, CQ_STATE, CQ_TYPE, CQ_TBLPROPERTIES, CQ_WORKER_ID, CQ_START, CQ_RUN_AS, CQ_HIGHEST_WRITE_ID, CQ_META_INFO, CQ_HADOOP_JOB_ID from COMPACTION_QUEUE WHERE CQ_ID = ?");
+         pStmt.setLong(1, ci.id);
+         rs = pStmt.executeQuery();
+         if(rs.next()) {
+           ci = CompactionInfo.loadFullFromCompactionQueue(rs);
+           String s = "delete from COMPACTION_QUEUE where cq_id = ?";
+           pStmt = dbConn.prepareStatement(s);
+           pStmt.setLong(1, ci.id);
+           LOG.debug("Going to execute update <" + s + ">");
+           int updCnt = pStmt.executeUpdate();
+         }
+         else {
+           if(ci.id > 0) {
+             //the record with valid CQ_ID has disappeared - this is a sign of something wrong
+             throw new IllegalStateException("No record with CQ_ID=" + ci.id + " found in COMPACTION_QUEUE");
+           }
+         }
+         if(ci.id == 0) {
+           //The failure occurred before we even made an entry in COMPACTION_QUEUE
+           //generate ID so that we can make an entry in COMPLETED_COMPACTIONS
+           ci.id = generateCompactionQueueId(stmt);
+           //mostly this indicates that the Initiator is paying attention to some table even though
+           //compactions are not happening.
+           ci.state = ATTEMPTED_STATE;
+           //this is not strictly accurate, but 'type' cannot be null.
+           if(ci.type == null) { ci.type = CompactionType.MINOR; }
+           ci.start = getDbTime(dbConn);
+         }
+         else {
+           ci.state = FAILED_STATE;
+         }
+         close(rs, stmt, null);
+         closeStmt(pStmt);
+ 
+         pStmt = dbConn.prepareStatement("insert into COMPLETED_COMPACTIONS(CC_ID, CC_DATABASE, CC_TABLE, CC_PARTITION, CC_STATE, CC_TYPE, CC_TBLPROPERTIES, CC_WORKER_ID, CC_START, CC_END, CC_RUN_AS, CC_HIGHEST_WRITE_ID, CC_META_INFO, CC_HADOOP_JOB_ID) VALUES(?,?,?,?,?, ?,?,?,?,?, ?,?,?,?)");
+         CompactionInfo.insertIntoCompletedCompactions(pStmt, ci, getDbTime(dbConn));
+         int updCount = pStmt.executeUpdate();
+         LOG.debug("Going to commit");
+         closeStmt(pStmt);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.warn("markFailed(" + ci.id + "):" + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         try {
+           checkRetryable(dbConn, e, "markFailed(" + ci + ")");
+         }
+         catch(MetaException ex) {
+           LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+         }
+         LOG.error("markFailed(" + ci + ") failed: " + e.getMessage(), e);
+       } finally {
+         close(rs, stmt, null);
+         close(null, pStmt, dbConn);
+       }
+     } catch (RetryException e) {
+       markFailed(ci);
+     }
+   }
+   @Override
+   @RetrySemantics.Idempotent
+   public void setHadoopJobId(String hadoopJobId, long id) {
+     try {
+       Connection dbConn = null;
+       Statement stmt = null;
+       try {
+         dbConn = getDbConn(Connection.TRANSACTION_READ_COMMITTED);
+         stmt = dbConn.createStatement();
+         String s = "update COMPACTION_QUEUE set CQ_HADOOP_JOB_ID = " + quoteString(hadoopJobId) + " WHERE CQ_ID = " + id;
+         LOG.debug("Going to execute <" + s + ">");
+         int updateCount = stmt.executeUpdate(s);
+         LOG.debug("Going to commit");
+         closeStmt(stmt);
+         dbConn.commit();
+       } catch (SQLException e) {
+         LOG.warn("setHadoopJobId(" + hadoopJobId + "," + id + "):" + e.getMessage());
+         LOG.debug("Going to rollback");
+         rollbackDBConn(dbConn);
+         try {
+           checkRetryable(dbConn, e, "setHadoopJobId(" + hadoopJobId + "," + id + ")");
+         }
+         catch(MetaException ex) {
+           LOG.error("Unable to connect to transaction database " + StringUtils.stringifyException(ex));
+         }
+         LOG.error("setHadoopJobId(" + hadoopJobId + "," + id + ") failed: " + e.getMessage(), e);
+       } finally {
+         close(null, stmt, dbConn);
+       }
+     } catch (RetryException e) {
+       setHadoopJobId(hadoopJobId, id);
+     }
+   }
+ }
+ 
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
index 0000000,f8c2ca2..319e612
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java
@@@ -1,0 -1,505 +1,599 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import java.sql.Connection;
+ import java.sql.Driver;
+ import java.sql.PreparedStatement;
+ import java.sql.ResultSet;
+ import java.sql.ResultSetMetaData;
+ import java.sql.SQLException;
+ import java.sql.SQLTransactionRollbackException;
+ import java.sql.Statement;
+ import java.util.Properties;
+ 
+ import com.google.common.annotations.VisibleForTesting;
++import jline.internal.Log;
+ import org.apache.hadoop.conf.Configuration;
++import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
++import org.apache.zookeeper.txn.TxnHeader;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ /**
+  * Utility methods for creating and destroying txn database/schema, plus methods for
+  * querying against metastore tables.
+  * Placed here in a separate class so it can be shared across unit tests.
+  */
+ public final class TxnDbUtil {
+ 
+   static final private Logger LOG = LoggerFactory.getLogger(TxnDbUtil.class.getName());
+   private static final String TXN_MANAGER = "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager";
+ 
+   private static int deadlockCnt = 0;
+ 
+   private TxnDbUtil() {
+     throw new UnsupportedOperationException("Can't initialize class");
+   }
+ 
+   /**
+    * Set up the configuration so it will use the DbTxnManager, concurrency will be set to true,
+    * and the JDBC configs will be set for putting the transaction and lock info in the embedded
+    * metastore.
+    *
+    * @param conf HiveConf to add these values to
+    */
+   public static void setConfValues(Configuration conf) {
+     MetastoreConf.setVar(conf, ConfVars.HIVE_TXN_MANAGER, TXN_MANAGER);
+     MetastoreConf.setBoolVar(conf, ConfVars.HIVE_SUPPORT_CONCURRENCY, true);
+   }
+ 
+   public static void prepDb(Configuration conf) throws Exception {
+     // This is a bogus hack because it copies the contents of the SQL file
+     // intended for creating derby databases, and thus will inexorably get
+     // out of date with it.  I'm open to any suggestions on how to make this
+     // read the file in a build friendly way.
+ 
+     Connection conn = null;
+     Statement stmt = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       stmt.execute("CREATE TABLE TXNS (" +
+           "  TXN_ID bigint PRIMARY KEY," +
+           "  TXN_STATE char(1) NOT NULL," +
+           "  TXN_STARTED bigint NOT NULL," +
+           "  TXN_LAST_HEARTBEAT bigint NOT NULL," +
+           "  TXN_USER varchar(128) NOT NULL," +
+           "  TXN_HOST varchar(128) NOT NULL," +
+           "  TXN_TYPE integer)");
+ 
+       stmt.execute("CREATE TABLE TXN_COMPONENTS (" +
+           "  TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID)," +
+           "  TC_DATABASE varchar(128) NOT NULL," +
+           "  TC_TABLE varchar(128)," +
+           "  TC_PARTITION varchar(767)," +
+           "  TC_OPERATION_TYPE char(1) NOT NULL," +
+           "  TC_WRITEID bigint)");
+       stmt.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" +
+           "  CTC_TXNID bigint NOT NULL," +
+           "  CTC_DATABASE varchar(128) NOT NULL," +
+           "  CTC_TABLE varchar(128)," +
+           "  CTC_PARTITION varchar(767)," +
+           "  CTC_ID bigint GENERATED ALWAYS AS IDENTITY (START WITH 1, INCREMENT BY 1) NOT NULL," +
+           "  CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL," +
+           "  CTC_WRITEID bigint)");
+       stmt.execute("CREATE TABLE NEXT_TXN_ID (" + "  NTXN_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_TXN_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE TXN_TO_WRITE_ID (" +
+           " T2W_TXNID bigint NOT NULL," +
+           " T2W_DATABASE varchar(128) NOT NULL," +
+           " T2W_TABLE varchar(256) NOT NULL," +
+           " T2W_WRITEID bigint NOT NULL)");
+       stmt.execute("CREATE TABLE NEXT_WRITE_ID (" +
+           " NWI_DATABASE varchar(128) NOT NULL," +
+           " NWI_TABLE varchar(256) NOT NULL," +
+           " NWI_NEXT bigint NOT NULL)");
+ 
+       stmt.execute("CREATE TABLE MIN_HISTORY_LEVEL (" +
+           " MHL_TXNID bigint NOT NULL," +
+           " MHL_MIN_OPEN_TXNID bigint NOT NULL," +
+           " PRIMARY KEY(MHL_TXNID))");
+ 
+       stmt.execute("CREATE TABLE HIVE_LOCKS (" +
+           " HL_LOCK_EXT_ID bigint NOT NULL," +
+           " HL_LOCK_INT_ID bigint NOT NULL," +
+           " HL_TXNID bigint NOT NULL," +
+           " HL_DB varchar(128) NOT NULL," +
+           " HL_TABLE varchar(128)," +
+           " HL_PARTITION varchar(767)," +
+           " HL_LOCK_STATE char(1) NOT NULL," +
+           " HL_LOCK_TYPE char(1) NOT NULL," +
+           " HL_LAST_HEARTBEAT bigint NOT NULL," +
+           " HL_ACQUIRED_AT bigint," +
+           " HL_USER varchar(128) NOT NULL," +
+           " HL_HOST varchar(128) NOT NULL," +
+           " HL_HEARTBEAT_COUNT integer," +
+           " HL_AGENT_INFO varchar(128)," +
+           " HL_BLOCKEDBY_EXT_ID bigint," +
+           " HL_BLOCKEDBY_INT_ID bigint," +
+         " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))");
+       stmt.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)");
+ 
+       stmt.execute("CREATE TABLE NEXT_LOCK_ID (" + " NL_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE COMPACTION_QUEUE (" +
+           " CQ_ID bigint PRIMARY KEY," +
+           " CQ_DATABASE varchar(128) NOT NULL," +
+           " CQ_TABLE varchar(128) NOT NULL," +
+           " CQ_PARTITION varchar(767)," +
+           " CQ_STATE char(1) NOT NULL," +
+           " CQ_TYPE char(1) NOT NULL," +
+           " CQ_TBLPROPERTIES varchar(2048)," +
+           " CQ_WORKER_ID varchar(128)," +
+           " CQ_START bigint," +
+           " CQ_RUN_AS varchar(128)," +
+           " CQ_HIGHEST_WRITE_ID bigint," +
+           " CQ_META_INFO varchar(2048) for bit data," +
+           " CQ_HADOOP_JOB_ID varchar(32))");
+ 
+       stmt.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)");
+       stmt.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)");
+ 
+       stmt.execute("CREATE TABLE COMPLETED_COMPACTIONS (" +
+           " CC_ID bigint PRIMARY KEY," +
+           " CC_DATABASE varchar(128) NOT NULL," +
+           " CC_TABLE varchar(128) NOT NULL," +
+           " CC_PARTITION varchar(767)," +
+           " CC_STATE char(1) NOT NULL," +
+           " CC_TYPE char(1) NOT NULL," +
+           " CC_TBLPROPERTIES varchar(2048)," +
+           " CC_WORKER_ID varchar(128)," +
+           " CC_START bigint," +
+           " CC_END bigint," +
+           " CC_RUN_AS varchar(128)," +
+           " CC_HIGHEST_WRITE_ID bigint," +
+           " CC_META_INFO varchar(2048) for bit data," +
+           " CC_HADOOP_JOB_ID varchar(32))");
+ 
+       stmt.execute("CREATE TABLE AUX_TABLE (" +
+         " MT_KEY1 varchar(128) NOT NULL," +
+         " MT_KEY2 bigint NOT NULL," +
+         " MT_COMMENT varchar(255)," +
+         " PRIMARY KEY(MT_KEY1, MT_KEY2))");
+ 
+       stmt.execute("CREATE TABLE WRITE_SET (" +
+         " WS_DATABASE varchar(128) NOT NULL," +
+         " WS_TABLE varchar(128) NOT NULL," +
+         " WS_PARTITION varchar(767)," +
+         " WS_TXNID bigint NOT NULL," +
+         " WS_COMMIT_ID bigint NOT NULL," +
+         " WS_OPERATION_TYPE char(1) NOT NULL)"
+       );
+ 
+       stmt.execute("CREATE TABLE REPL_TXN_MAP (" +
+           " RTM_REPL_POLICY varchar(256) NOT NULL, " +
+           " RTM_SRC_TXN_ID bigint NOT NULL, " +
+           " RTM_TARGET_TXN_ID bigint NOT NULL, " +
+           " PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID))"
+       );
+ 
+       try {
++        stmt.execute("CREATE TABLE \"APP\".\"TBLS\" (\"TBL_ID\" BIGINT NOT NULL, " +
++            " \"CREATE_TIME\" INTEGER NOT NULL, \"DB_ID\" BIGINT, \"LAST_ACCESS_TIME\" INTEGER NOT NULL, " +
++            " \"OWNER\" VARCHAR(767), \"OWNER_TYPE\" VARCHAR(10), \"RETENTION\" INTEGER NOT NULL, " +
++            " \"SD_ID\" BIGINT, \"TBL_NAME\" VARCHAR(256), \"TBL_TYPE\" VARCHAR(128), " +
++            " \"VIEW_EXPANDED_TEXT\" LONG VARCHAR, \"VIEW_ORIGINAL_TEXT\" LONG VARCHAR, " +
++            " \"IS_REWRITE_ENABLED\" CHAR(1) NOT NULL DEFAULT \'N\', " +
++            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
++            " PRIMARY KEY (TBL_ID))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TBLS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"PARTITIONS\" (" +
++            " \"PART_ID\" BIGINT NOT NULL, \"CREATE_TIME\" INTEGER NOT NULL, " +
++            " \"LAST_ACCESS_TIME\" INTEGER NOT NULL, \"PART_NAME\" VARCHAR(767), " +
++            " \"SD_ID\" BIGINT, \"TBL_ID\" BIGINT, " +
++            " \"WRITE_ID\" BIGINT DEFAULT 0, " +
++            " PRIMARY KEY (PART_ID))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("PARTITIONS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"TABLE_PARAMS\" (" +
++            " \"TBL_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
++            " \"PARAM_VALUE\" CLOB, " +
++            " PRIMARY KEY (TBL_ID, PARAM_KEY))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("TABLE_PARAMS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
++        stmt.execute("CREATE TABLE \"APP\".\"PARTITION_PARAMS\" (" +
++            " \"PART_ID\" BIGINT NOT NULL, \"PARAM_KEY\" VARCHAR(256) NOT NULL, " +
++            " \"PARAM_VALUE\" VARCHAR(4000), " +
++            " PRIMARY KEY (PART_ID, PARAM_KEY))"
++        );
++      } catch (SQLException e) {
++        if (e.getMessage() != null && e.getMessage().contains("already exists")) {
++          LOG.info("PARTITION_PARAMS table already exist, ignoring");
++        } else {
++          throw e;
++        }
++      }
++
++      try {
+         stmt.execute("CREATE TABLE \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\" VARCHAR(256) NOT " +
+ 
+                 "NULL, \"NEXT_VAL\" BIGINT NOT NULL)"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("SEQUENCE_TABLE table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       try {
+         stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\" BIGINT NOT NULL, " +
+ 
+                 "\"NEXT_EVENT_ID\" BIGINT NOT NULL)"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("NOTIFICATION_SEQUENCE table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       try {
+         stmt.execute("CREATE TABLE \"APP\".\"NOTIFICATION_LOG\" (\"NL_ID\" BIGINT NOT NULL, " +
+                 "\"DB_NAME\" VARCHAR(128), \"EVENT_ID\" BIGINT NOT NULL, \"EVENT_TIME\" INTEGER NOT" +
+ 
+                 " NULL, \"EVENT_TYPE\" VARCHAR(32) NOT NULL, \"MESSAGE\" CLOB, \"TBL_NAME\" " +
+                 "VARCHAR" +
+                 "(256), \"MESSAGE_FORMAT\" VARCHAR(16))"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("NOTIFICATION_LOG table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
+               "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', " +
+               "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
+               ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
+               ".model.MNotificationLog')");
+ 
+       stmt.execute("INSERT INTO \"APP\".\"NOTIFICATION_SEQUENCE\" (\"NNI_ID\", \"NEXT_EVENT_ID\")" +
+               " SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT " +
+               "\"NEXT_EVENT_ID\" FROM \"APP\".\"NOTIFICATION_SEQUENCE\")");
+ 
+       try {
+         stmt.execute("CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (" +
+                 "WNL_ID bigint NOT NULL," +
+                 "WNL_TXNID bigint NOT NULL," +
+                 "WNL_WRITEID bigint NOT NULL," +
+                 "WNL_DATABASE varchar(128) NOT NULL," +
+                 "WNL_TABLE varchar(128) NOT NULL," +
+                 "WNL_PARTITION varchar(1024) NOT NULL," +
+                 "WNL_TABLE_OBJ clob NOT NULL," +
+                 "WNL_PARTITION_OBJ clob," +
+                 "WNL_FILES clob," +
+                 "WNL_EVENT_TIME integer NOT NULL," +
+                 "PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION))"
+         );
+       } catch (SQLException e) {
+         if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+           LOG.info("TXN_WRITE_NOTIFICATION_LOG table already exist, ignoring");
+         } else {
+           throw e;
+         }
+       }
+ 
+       stmt.execute("INSERT INTO \"APP\".\"SEQUENCE_TABLE\" (\"SEQUENCE_NAME\", \"NEXT_VAL\") " +
+               "SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', " +
+               "1)) tmp_table WHERE NOT EXISTS ( SELECT \"NEXT_VAL\" FROM \"APP\"" +
+               ".\"SEQUENCE_TABLE\" WHERE \"SEQUENCE_NAME\" = 'org.apache.hadoop.hive.metastore" +
+               ".model.MTxnWriteNotificationLog')");
+     } catch (SQLException e) {
+       try {
+         conn.rollback();
+       } catch (SQLException re) {
+         LOG.error("Error rolling back: " + re.getMessage());
+       }
+ 
+       // Another thread might have already created these tables.
+       if (e.getMessage() != null && e.getMessage().contains("already exists")) {
+         LOG.info("Txn tables already exist, returning");
+         return;
+       }
+ 
+       // This might be a deadlock, if so, let's retry
+       if (e instanceof SQLTransactionRollbackException && deadlockCnt++ < 5) {
+         LOG.warn("Caught deadlock, retrying db creation");
+         prepDb(conf);
+       } else {
+         throw e;
+       }
+     } finally {
+       deadlockCnt = 0;
+       closeResources(conn, stmt, null);
+     }
+   }
+ 
+   public static void cleanDb(Configuration conf) throws Exception {
+     int retryCount = 0;
+     while(++retryCount <= 3) {
+       boolean success = true;
+       Connection conn = null;
+       Statement stmt = null;
+       try {
+         conn = getConnection(conf);
+         stmt = conn.createStatement();
+ 
+         // We want to try these, whether they succeed or fail.
+         try {
+           stmt.execute("DROP INDEX HL_TXNID_INDEX");
+         } catch (SQLException e) {
+           if(!("42X65".equals(e.getSQLState()) && 30000 == e.getErrorCode())) {
+             //42X65/3000 means index doesn't exist
+             LOG.error("Unable to drop index HL_TXNID_INDEX " + e.getMessage() +
+               "State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
+             success = false;
+           }
+         }
+ 
+         success &= dropTable(stmt, "TXN_COMPONENTS", retryCount);
+         success &= dropTable(stmt, "COMPLETED_TXN_COMPONENTS", retryCount);
+         success &= dropTable(stmt, "TXNS", retryCount);
+         success &= dropTable(stmt, "NEXT_TXN_ID", retryCount);
+         success &= dropTable(stmt, "TXN_TO_WRITE_ID", retryCount);
+         success &= dropTable(stmt, "NEXT_WRITE_ID", retryCount);
+         success &= dropTable(stmt, "MIN_HISTORY_LEVEL", retryCount);
+         success &= dropTable(stmt, "HIVE_LOCKS", retryCount);
+         success &= dropTable(stmt, "NEXT_LOCK_ID", retryCount);
+         success &= dropTable(stmt, "COMPACTION_QUEUE", retryCount);
+         success &= dropTable(stmt, "NEXT_COMPACTION_QUEUE_ID", retryCount);
+         success &= dropTable(stmt, "COMPLETED_COMPACTIONS", retryCount);
+         success &= dropTable(stmt, "AUX_TABLE", retryCount);
+         success &= dropTable(stmt, "WRITE_SET", retryCount);
+         success &= dropTable(stmt, "REPL_TXN_MAP", retryCount);
+         /*
+          * Don't drop NOTIFICATION_LOG, SEQUENCE_TABLE and NOTIFICATION_SEQUENCE as its used by other
+          * table which are not txn related to generate primary key. So if these tables are dropped
+          *  and other tables are not dropped, then it will create key duplicate error while inserting
+          *  to other table.
+          */
+       } finally {
+         closeResources(conn, stmt, null);
+       }
+       if(success) {
+         return;
+       }
+     }
+     throw new RuntimeException("Failed to clean up txn tables");
+   }
+ 
+   private static boolean dropTable(Statement stmt, String name, int retryCount) throws SQLException {
+     for (int i = 0; i < 3; i++) {
+       try {
+         stmt.execute("DROP TABLE " + name);
+         LOG.debug("Successfully dropped table " + name);
+         return true;
+       } catch (SQLException e) {
+         if ("42Y55".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+           LOG.debug("Not dropping " + name + " because it doesn't exist");
+           //failed because object doesn't exist
+           return true;
+         }
+         if ("X0Y25".equals(e.getSQLState()) && 30000 == e.getErrorCode()) {
+           // Intermittent failure
+           LOG.warn("Intermittent drop failure, retrying, try number " + i);
+           continue;
+         }
+         LOG.error("Unable to drop table " + name + ": " + e.getMessage() +
+             " State=" + e.getSQLState() + " code=" + e.getErrorCode() + " retryCount=" + retryCount);
+       }
+     }
+     LOG.error("Failed to drop table, don't know why");
+     return false;
+   }
+ 
+   /**
+    * A tool to count the number of partitions, tables,
+    * and databases locked by a particular lockId.
+    *
+    * @param lockId lock id to look for lock components
+    *
+    * @return number of components, or 0 if there is no lock
+    */
+   public static int countLockComponents(Configuration conf, long lockId) throws Exception {
+     Connection conn = null;
+     PreparedStatement stmt = null;
+     ResultSet rs = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.prepareStatement("SELECT count(*) FROM hive_locks WHERE hl_lock_ext_id = ?");
+       stmt.setLong(1, lockId);
+       rs = stmt.executeQuery();
+       if (!rs.next()) {
+         return 0;
+       }
+       return rs.getInt(1);
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+   }
+ 
+   /**
++   * Return true if the transaction of the given txnId is open.
++   * @param conf    HiveConf
++   * @param txnId   transaction id to search for
++   * @return
++   * @throws Exception
++   */
++  public static boolean isOpenOrAbortedTransaction(Configuration conf, long txnId) throws Exception {
++    Connection conn = null;
++    PreparedStatement stmt = null;
++    ResultSet rs = null;
++    try {
++      conn = getConnection(conf);
++      conn.setAutoCommit(false);
++      conn.setTransactionIsolation(Connection.TRANSACTION_READ_COMMITTED);
++
++      stmt = conn.prepareStatement("SELECT txn_id FROM TXNS WHERE txn_id = ?");
++      stmt.setLong(1, txnId);
++      rs = stmt.executeQuery();
++      if (!rs.next()) {
++        return false;
++      } else {
++        return true;
++      }
++    } finally {
++      closeResources(conn, stmt, rs);
++    }
++  }
++
++  /**
+    * Utility method used to run COUNT queries like "select count(*) from ..." against metastore tables
+    * @param countQuery countQuery text
+    * @return count countQuery result
+    * @throws Exception
+    */
+   public static int countQueryAgent(Configuration conf, String countQuery) throws Exception {
+     Connection conn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       rs = stmt.executeQuery(countQuery);
+       if (!rs.next()) {
+         return 0;
+       }
+       return rs.getInt(1);
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+   }
+   @VisibleForTesting
+   public static String queryToString(Configuration conf, String query) throws Exception {
+     return queryToString(conf, query, true);
+   }
+   public static String queryToString(Configuration conf, String query, boolean includeHeader)
+       throws Exception {
+     Connection conn = null;
+     Statement stmt = null;
+     ResultSet rs = null;
+     StringBuilder sb = new StringBuilder();
+     try {
+       conn = getConnection(conf);
+       stmt = conn.createStatement();
+       rs = stmt.executeQuery(query);
+       ResultSetMetaData rsmd = rs.getMetaData();
+       if(includeHeader) {
+         for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
+           sb.append(rsmd.getColumnName(colPos)).append("   ");
+         }
+         sb.append('\n');
+       }
+       while(rs.next()) {
+         for (int colPos = 1; colPos <= rsmd.getColumnCount(); colPos++) {
+           sb.append(rs.getObject(colPos)).append("   ");
+         }
+         sb.append('\n');
+       }
+     } finally {
+       closeResources(conn, stmt, rs);
+     }
+     return sb.toString();
+   }
+ 
+   static Connection getConnection(Configuration conf) throws Exception {
+     String jdbcDriver = MetastoreConf.getVar(conf, ConfVars.CONNECTION_DRIVER);
+     Driver driver = (Driver) Class.forName(jdbcDriver).newInstance();
+     Properties prop = new Properties();
+     String driverUrl = MetastoreConf.getVar(conf, ConfVars.CONNECT_URL_KEY);
+     String user = MetastoreConf.getVar(conf, ConfVars.CONNECTION_USER_NAME);
+     String passwd = MetastoreConf.getPassword(conf, MetastoreConf.ConfVars.PWD);
+     prop.setProperty("user", user);
+     prop.setProperty("password", passwd);
+     Connection conn = driver.connect(driverUrl, prop);
+     conn.setAutoCommit(true);
+     return conn;
+   }
+ 
+   static void closeResources(Connection conn, Statement stmt, ResultSet rs) {
+     if (rs != null) {
+       try {
+         rs.close();
+       } catch (SQLException e) {
+         LOG.error("Error closing ResultSet: " + e.getMessage());
+       }
+     }
+ 
+     if (stmt != null) {
+       try {
+         stmt.close();
+       } catch (SQLException e) {
+         System.err.println("Error closing Statement: " + e.getMessage());
+       }
+     }
+ 
+     if (conn != null) {
+       try {
+         conn.rollback();
+       } catch (SQLException e) {
+         System.err.println("Error rolling back: " + e.getMessage());
+       }
+       try {
+         conn.close();
+       } catch (SQLException e) {
+         System.err.println("Error closing Connection: " + e.getMessage());
+       }
+     }
+   }
+ }


[04/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RuntimeStat.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RuntimeStat.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RuntimeStat.java
new file mode 100644
index 0000000..b48718c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RuntimeStat.java
@@ -0,0 +1,600 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class RuntimeStat implements org.apache.thrift.TBase<RuntimeStat, RuntimeStat._Fields>, java.io.Serializable, Cloneable, Comparable<RuntimeStat> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RuntimeStat");
+
+  private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField WEIGHT_FIELD_DESC = new org.apache.thrift.protocol.TField("weight", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField PAYLOAD_FIELD_DESC = new org.apache.thrift.protocol.TField("payload", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new RuntimeStatStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new RuntimeStatTupleSchemeFactory());
+  }
+
+  private int createTime; // optional
+  private int weight; // required
+  private ByteBuffer payload; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CREATE_TIME((short)1, "createTime"),
+    WEIGHT((short)2, "weight"),
+    PAYLOAD((short)3, "payload");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CREATE_TIME
+          return CREATE_TIME;
+        case 2: // WEIGHT
+          return WEIGHT;
+        case 3: // PAYLOAD
+          return PAYLOAD;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CREATETIME_ISSET_ID = 0;
+  private static final int __WEIGHT_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.CREATE_TIME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.WEIGHT, new org.apache.thrift.meta_data.FieldMetaData("weight", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.PAYLOAD, new org.apache.thrift.meta_data.FieldMetaData("payload", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RuntimeStat.class, metaDataMap);
+  }
+
+  public RuntimeStat() {
+  }
+
+  public RuntimeStat(
+    int weight,
+    ByteBuffer payload)
+  {
+    this();
+    this.weight = weight;
+    setWeightIsSet(true);
+    this.payload = org.apache.thrift.TBaseHelper.copyBinary(payload);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public RuntimeStat(RuntimeStat other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.createTime = other.createTime;
+    this.weight = other.weight;
+    if (other.isSetPayload()) {
+      this.payload = org.apache.thrift.TBaseHelper.copyBinary(other.payload);
+    }
+  }
+
+  public RuntimeStat deepCopy() {
+    return new RuntimeStat(this);
+  }
+
+  @Override
+  public void clear() {
+    setCreateTimeIsSet(false);
+    this.createTime = 0;
+    setWeightIsSet(false);
+    this.weight = 0;
+    this.payload = null;
+  }
+
+  public int getCreateTime() {
+    return this.createTime;
+  }
+
+  public void setCreateTime(int createTime) {
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+  }
+
+  public void unsetCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  public void setCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+  }
+
+  public int getWeight() {
+    return this.weight;
+  }
+
+  public void setWeight(int weight) {
+    this.weight = weight;
+    setWeightIsSet(true);
+  }
+
+  public void unsetWeight() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WEIGHT_ISSET_ID);
+  }
+
+  /** Returns true if field weight is set (has been assigned a value) and false otherwise */
+  public boolean isSetWeight() {
+    return EncodingUtils.testBit(__isset_bitfield, __WEIGHT_ISSET_ID);
+  }
+
+  public void setWeightIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WEIGHT_ISSET_ID, value);
+  }
+
+  public byte[] getPayload() {
+    setPayload(org.apache.thrift.TBaseHelper.rightSize(payload));
+    return payload == null ? null : payload.array();
+  }
+
+  public ByteBuffer bufferForPayload() {
+    return org.apache.thrift.TBaseHelper.copyBinary(payload);
+  }
+
+  public void setPayload(byte[] payload) {
+    this.payload = payload == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(payload, payload.length));
+  }
+
+  public void setPayload(ByteBuffer payload) {
+    this.payload = org.apache.thrift.TBaseHelper.copyBinary(payload);
+  }
+
+  public void unsetPayload() {
+    this.payload = null;
+  }
+
+  /** Returns true if field payload is set (has been assigned a value) and false otherwise */
+  public boolean isSetPayload() {
+    return this.payload != null;
+  }
+
+  public void setPayloadIsSet(boolean value) {
+    if (!value) {
+      this.payload = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CREATE_TIME:
+      if (value == null) {
+        unsetCreateTime();
+      } else {
+        setCreateTime((Integer)value);
+      }
+      break;
+
+    case WEIGHT:
+      if (value == null) {
+        unsetWeight();
+      } else {
+        setWeight((Integer)value);
+      }
+      break;
+
+    case PAYLOAD:
+      if (value == null) {
+        unsetPayload();
+      } else {
+        setPayload((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CREATE_TIME:
+      return getCreateTime();
+
+    case WEIGHT:
+      return getWeight();
+
+    case PAYLOAD:
+      return getPayload();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CREATE_TIME:
+      return isSetCreateTime();
+    case WEIGHT:
+      return isSetWeight();
+    case PAYLOAD:
+      return isSetPayload();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof RuntimeStat)
+      return this.equals((RuntimeStat)that);
+    return false;
+  }
+
+  public boolean equals(RuntimeStat that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_createTime = true && this.isSetCreateTime();
+    boolean that_present_createTime = true && that.isSetCreateTime();
+    if (this_present_createTime || that_present_createTime) {
+      if (!(this_present_createTime && that_present_createTime))
+        return false;
+      if (this.createTime != that.createTime)
+        return false;
+    }
+
+    boolean this_present_weight = true;
+    boolean that_present_weight = true;
+    if (this_present_weight || that_present_weight) {
+      if (!(this_present_weight && that_present_weight))
+        return false;
+      if (this.weight != that.weight)
+        return false;
+    }
+
+    boolean this_present_payload = true && this.isSetPayload();
+    boolean that_present_payload = true && that.isSetPayload();
+    if (this_present_payload || that_present_payload) {
+      if (!(this_present_payload && that_present_payload))
+        return false;
+      if (!this.payload.equals(that.payload))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_createTime = true && (isSetCreateTime());
+    list.add(present_createTime);
+    if (present_createTime)
+      list.add(createTime);
+
+    boolean present_weight = true;
+    list.add(present_weight);
+    if (present_weight)
+      list.add(weight);
+
+    boolean present_payload = true && (isSetPayload());
+    list.add(present_payload);
+    if (present_payload)
+      list.add(payload);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(RuntimeStat other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetWeight()).compareTo(other.isSetWeight());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetWeight()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.weight, other.weight);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPayload()).compareTo(other.isSetPayload());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPayload()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.payload, other.payload);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("RuntimeStat(");
+    boolean first = true;
+
+    if (isSetCreateTime()) {
+      sb.append("createTime:");
+      sb.append(this.createTime);
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("weight:");
+    sb.append(this.weight);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("payload:");
+    if (this.payload == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.payload, sb);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetWeight()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'weight' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPayload()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'payload' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class RuntimeStatStandardSchemeFactory implements SchemeFactory {
+    public RuntimeStatStandardScheme getScheme() {
+      return new RuntimeStatStandardScheme();
+    }
+  }
+
+  private static class RuntimeStatStandardScheme extends StandardScheme<RuntimeStat> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, RuntimeStat struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.createTime = iprot.readI32();
+              struct.setCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // WEIGHT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.weight = iprot.readI32();
+              struct.setWeightIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PAYLOAD
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.payload = iprot.readBinary();
+              struct.setPayloadIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, RuntimeStat struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.isSetCreateTime()) {
+        oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+        oprot.writeI32(struct.createTime);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(WEIGHT_FIELD_DESC);
+      oprot.writeI32(struct.weight);
+      oprot.writeFieldEnd();
+      if (struct.payload != null) {
+        oprot.writeFieldBegin(PAYLOAD_FIELD_DESC);
+        oprot.writeBinary(struct.payload);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class RuntimeStatTupleSchemeFactory implements SchemeFactory {
+    public RuntimeStatTupleScheme getScheme() {
+      return new RuntimeStatTupleScheme();
+    }
+  }
+
+  private static class RuntimeStatTupleScheme extends TupleScheme<RuntimeStat> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, RuntimeStat struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI32(struct.weight);
+      oprot.writeBinary(struct.payload);
+      BitSet optionals = new BitSet();
+      if (struct.isSetCreateTime()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetCreateTime()) {
+        oprot.writeI32(struct.createTime);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, RuntimeStat struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.weight = iprot.readI32();
+      struct.setWeightIsSet(true);
+      struct.payload = iprot.readBinary();
+      struct.setPayloadIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.createTime = iprot.readI32();
+        struct.setCreateTimeIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java
new file mode 100644
index 0000000..9a5d328
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLCheckConstraint.java
@@ -0,0 +1,1213 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class SQLCheckConstraint implements org.apache.thrift.TBase<SQLCheckConstraint, SQLCheckConstraint._Fields>, java.io.Serializable, Cloneable, Comparable<SQLCheckConstraint> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLCheckConstraint");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_DB_FIELD_DESC = new org.apache.thrift.protocol.TField("table_db", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField CHECK_EXPRESSION_FIELD_DESC = new org.apache.thrift.protocol.TField("check_expression", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField DC_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dc_name", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField ENABLE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("enable_cstr", org.apache.thrift.protocol.TType.BOOL, (short)7);
+  private static final org.apache.thrift.protocol.TField VALIDATE_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("validate_cstr", org.apache.thrift.protocol.TType.BOOL, (short)8);
+  private static final org.apache.thrift.protocol.TField RELY_CSTR_FIELD_DESC = new org.apache.thrift.protocol.TField("rely_cstr", org.apache.thrift.protocol.TType.BOOL, (short)9);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new SQLCheckConstraintStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new SQLCheckConstraintTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String table_db; // required
+  private String table_name; // required
+  private String column_name; // required
+  private String check_expression; // required
+  private String dc_name; // required
+  private boolean enable_cstr; // required
+  private boolean validate_cstr; // required
+  private boolean rely_cstr; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    TABLE_DB((short)2, "table_db"),
+    TABLE_NAME((short)3, "table_name"),
+    COLUMN_NAME((short)4, "column_name"),
+    CHECK_EXPRESSION((short)5, "check_expression"),
+    DC_NAME((short)6, "dc_name"),
+    ENABLE_CSTR((short)7, "enable_cstr"),
+    VALIDATE_CSTR((short)8, "validate_cstr"),
+    RELY_CSTR((short)9, "rely_cstr");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // TABLE_DB
+          return TABLE_DB;
+        case 3: // TABLE_NAME
+          return TABLE_NAME;
+        case 4: // COLUMN_NAME
+          return COLUMN_NAME;
+        case 5: // CHECK_EXPRESSION
+          return CHECK_EXPRESSION;
+        case 6: // DC_NAME
+          return DC_NAME;
+        case 7: // ENABLE_CSTR
+          return ENABLE_CSTR;
+        case 8: // VALIDATE_CSTR
+          return VALIDATE_CSTR;
+        case 9: // RELY_CSTR
+          return RELY_CSTR;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ENABLE_CSTR_ISSET_ID = 0;
+  private static final int __VALIDATE_CSTR_ISSET_ID = 1;
+  private static final int __RELY_CSTR_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_DB, new org.apache.thrift.meta_data.FieldMetaData("table_db", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("column_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CHECK_EXPRESSION, new org.apache.thrift.meta_data.FieldMetaData("check_expression", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DC_NAME, new org.apache.thrift.meta_data.FieldMetaData("dc_name", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.ENABLE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("enable_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.VALIDATE_CSTR, new org.apache.thrift.meta_data.FieldMetaData("validate_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.RELY_CSTR, new org.apache.thrift.meta_data.FieldMetaData("rely_cstr", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLCheckConstraint.class, metaDataMap);
+  }
+
+  public SQLCheckConstraint() {
+  }
+
+  public SQLCheckConstraint(
+    String catName,
+    String table_db,
+    String table_name,
+    String column_name,
+    String check_expression,
+    String dc_name,
+    boolean enable_cstr,
+    boolean validate_cstr,
+    boolean rely_cstr)
+  {
+    this();
+    this.catName = catName;
+    this.table_db = table_db;
+    this.table_name = table_name;
+    this.column_name = column_name;
+    this.check_expression = check_expression;
+    this.dc_name = dc_name;
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public SQLCheckConstraint(SQLCheckConstraint other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetTable_db()) {
+      this.table_db = other.table_db;
+    }
+    if (other.isSetTable_name()) {
+      this.table_name = other.table_name;
+    }
+    if (other.isSetColumn_name()) {
+      this.column_name = other.column_name;
+    }
+    if (other.isSetCheck_expression()) {
+      this.check_expression = other.check_expression;
+    }
+    if (other.isSetDc_name()) {
+      this.dc_name = other.dc_name;
+    }
+    this.enable_cstr = other.enable_cstr;
+    this.validate_cstr = other.validate_cstr;
+    this.rely_cstr = other.rely_cstr;
+  }
+
+  public SQLCheckConstraint deepCopy() {
+    return new SQLCheckConstraint(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.table_db = null;
+    this.table_name = null;
+    this.column_name = null;
+    this.check_expression = null;
+    this.dc_name = null;
+    setEnable_cstrIsSet(false);
+    this.enable_cstr = false;
+    setValidate_cstrIsSet(false);
+    this.validate_cstr = false;
+    setRely_cstrIsSet(false);
+    this.rely_cstr = false;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getTable_db() {
+    return this.table_db;
+  }
+
+  public void setTable_db(String table_db) {
+    this.table_db = table_db;
+  }
+
+  public void unsetTable_db() {
+    this.table_db = null;
+  }
+
+  /** Returns true if field table_db is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable_db() {
+    return this.table_db != null;
+  }
+
+  public void setTable_dbIsSet(boolean value) {
+    if (!value) {
+      this.table_db = null;
+    }
+  }
+
+  public String getTable_name() {
+    return this.table_name;
+  }
+
+  public void setTable_name(String table_name) {
+    this.table_name = table_name;
+  }
+
+  public void unsetTable_name() {
+    this.table_name = null;
+  }
+
+  /** Returns true if field table_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTable_name() {
+    return this.table_name != null;
+  }
+
+  public void setTable_nameIsSet(boolean value) {
+    if (!value) {
+      this.table_name = null;
+    }
+  }
+
+  public String getColumn_name() {
+    return this.column_name;
+  }
+
+  public void setColumn_name(String column_name) {
+    this.column_name = column_name;
+  }
+
+  public void unsetColumn_name() {
+    this.column_name = null;
+  }
+
+  /** Returns true if field column_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetColumn_name() {
+    return this.column_name != null;
+  }
+
+  public void setColumn_nameIsSet(boolean value) {
+    if (!value) {
+      this.column_name = null;
+    }
+  }
+
+  public String getCheck_expression() {
+    return this.check_expression;
+  }
+
+  public void setCheck_expression(String check_expression) {
+    this.check_expression = check_expression;
+  }
+
+  public void unsetCheck_expression() {
+    this.check_expression = null;
+  }
+
+  /** Returns true if field check_expression is set (has been assigned a value) and false otherwise */
+  public boolean isSetCheck_expression() {
+    return this.check_expression != null;
+  }
+
+  public void setCheck_expressionIsSet(boolean value) {
+    if (!value) {
+      this.check_expression = null;
+    }
+  }
+
+  public String getDc_name() {
+    return this.dc_name;
+  }
+
+  public void setDc_name(String dc_name) {
+    this.dc_name = dc_name;
+  }
+
+  public void unsetDc_name() {
+    this.dc_name = null;
+  }
+
+  /** Returns true if field dc_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetDc_name() {
+    return this.dc_name != null;
+  }
+
+  public void setDc_nameIsSet(boolean value) {
+    if (!value) {
+      this.dc_name = null;
+    }
+  }
+
+  public boolean isEnable_cstr() {
+    return this.enable_cstr;
+  }
+
+  public void setEnable_cstr(boolean enable_cstr) {
+    this.enable_cstr = enable_cstr;
+    setEnable_cstrIsSet(true);
+  }
+
+  public void unsetEnable_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field enable_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetEnable_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID);
+  }
+
+  public void setEnable_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ENABLE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isValidate_cstr() {
+    return this.validate_cstr;
+  }
+
+  public void setValidate_cstr(boolean validate_cstr) {
+    this.validate_cstr = validate_cstr;
+    setValidate_cstrIsSet(true);
+  }
+
+  public void unsetValidate_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field validate_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidate_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID);
+  }
+
+  public void setValidate_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __VALIDATE_CSTR_ISSET_ID, value);
+  }
+
+  public boolean isRely_cstr() {
+    return this.rely_cstr;
+  }
+
+  public void setRely_cstr(boolean rely_cstr) {
+    this.rely_cstr = rely_cstr;
+    setRely_cstrIsSet(true);
+  }
+
+  public void unsetRely_cstr() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  /** Returns true if field rely_cstr is set (has been assigned a value) and false otherwise */
+  public boolean isSetRely_cstr() {
+    return EncodingUtils.testBit(__isset_bitfield, __RELY_CSTR_ISSET_ID);
+  }
+
+  public void setRely_cstrIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RELY_CSTR_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case TABLE_DB:
+      if (value == null) {
+        unsetTable_db();
+      } else {
+        setTable_db((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTable_name();
+      } else {
+        setTable_name((String)value);
+      }
+      break;
+
+    case COLUMN_NAME:
+      if (value == null) {
+        unsetColumn_name();
+      } else {
+        setColumn_name((String)value);
+      }
+      break;
+
+    case CHECK_EXPRESSION:
+      if (value == null) {
+        unsetCheck_expression();
+      } else {
+        setCheck_expression((String)value);
+      }
+      break;
+
+    case DC_NAME:
+      if (value == null) {
+        unsetDc_name();
+      } else {
+        setDc_name((String)value);
+      }
+      break;
+
+    case ENABLE_CSTR:
+      if (value == null) {
+        unsetEnable_cstr();
+      } else {
+        setEnable_cstr((Boolean)value);
+      }
+      break;
+
+    case VALIDATE_CSTR:
+      if (value == null) {
+        unsetValidate_cstr();
+      } else {
+        setValidate_cstr((Boolean)value);
+      }
+      break;
+
+    case RELY_CSTR:
+      if (value == null) {
+        unsetRely_cstr();
+      } else {
+        setRely_cstr((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case TABLE_DB:
+      return getTable_db();
+
+    case TABLE_NAME:
+      return getTable_name();
+
+    case COLUMN_NAME:
+      return getColumn_name();
+
+    case CHECK_EXPRESSION:
+      return getCheck_expression();
+
+    case DC_NAME:
+      return getDc_name();
+
+    case ENABLE_CSTR:
+      return isEnable_cstr();
+
+    case VALIDATE_CSTR:
+      return isValidate_cstr();
+
+    case RELY_CSTR:
+      return isRely_cstr();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case TABLE_DB:
+      return isSetTable_db();
+    case TABLE_NAME:
+      return isSetTable_name();
+    case COLUMN_NAME:
+      return isSetColumn_name();
+    case CHECK_EXPRESSION:
+      return isSetCheck_expression();
+    case DC_NAME:
+      return isSetDc_name();
+    case ENABLE_CSTR:
+      return isSetEnable_cstr();
+    case VALIDATE_CSTR:
+      return isSetValidate_cstr();
+    case RELY_CSTR:
+      return isSetRely_cstr();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof SQLCheckConstraint)
+      return this.equals((SQLCheckConstraint)that);
+    return false;
+  }
+
+  public boolean equals(SQLCheckConstraint that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_table_db = true && this.isSetTable_db();
+    boolean that_present_table_db = true && that.isSetTable_db();
+    if (this_present_table_db || that_present_table_db) {
+      if (!(this_present_table_db && that_present_table_db))
+        return false;
+      if (!this.table_db.equals(that.table_db))
+        return false;
+    }
+
+    boolean this_present_table_name = true && this.isSetTable_name();
+    boolean that_present_table_name = true && that.isSetTable_name();
+    if (this_present_table_name || that_present_table_name) {
+      if (!(this_present_table_name && that_present_table_name))
+        return false;
+      if (!this.table_name.equals(that.table_name))
+        return false;
+    }
+
+    boolean this_present_column_name = true && this.isSetColumn_name();
+    boolean that_present_column_name = true && that.isSetColumn_name();
+    if (this_present_column_name || that_present_column_name) {
+      if (!(this_present_column_name && that_present_column_name))
+        return false;
+      if (!this.column_name.equals(that.column_name))
+        return false;
+    }
+
+    boolean this_present_check_expression = true && this.isSetCheck_expression();
+    boolean that_present_check_expression = true && that.isSetCheck_expression();
+    if (this_present_check_expression || that_present_check_expression) {
+      if (!(this_present_check_expression && that_present_check_expression))
+        return false;
+      if (!this.check_expression.equals(that.check_expression))
+        return false;
+    }
+
+    boolean this_present_dc_name = true && this.isSetDc_name();
+    boolean that_present_dc_name = true && that.isSetDc_name();
+    if (this_present_dc_name || that_present_dc_name) {
+      if (!(this_present_dc_name && that_present_dc_name))
+        return false;
+      if (!this.dc_name.equals(that.dc_name))
+        return false;
+    }
+
+    boolean this_present_enable_cstr = true;
+    boolean that_present_enable_cstr = true;
+    if (this_present_enable_cstr || that_present_enable_cstr) {
+      if (!(this_present_enable_cstr && that_present_enable_cstr))
+        return false;
+      if (this.enable_cstr != that.enable_cstr)
+        return false;
+    }
+
+    boolean this_present_validate_cstr = true;
+    boolean that_present_validate_cstr = true;
+    if (this_present_validate_cstr || that_present_validate_cstr) {
+      if (!(this_present_validate_cstr && that_present_validate_cstr))
+        return false;
+      if (this.validate_cstr != that.validate_cstr)
+        return false;
+    }
+
+    boolean this_present_rely_cstr = true;
+    boolean that_present_rely_cstr = true;
+    if (this_present_rely_cstr || that_present_rely_cstr) {
+      if (!(this_present_rely_cstr && that_present_rely_cstr))
+        return false;
+      if (this.rely_cstr != that.rely_cstr)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_table_db = true && (isSetTable_db());
+    list.add(present_table_db);
+    if (present_table_db)
+      list.add(table_db);
+
+    boolean present_table_name = true && (isSetTable_name());
+    list.add(present_table_name);
+    if (present_table_name)
+      list.add(table_name);
+
+    boolean present_column_name = true && (isSetColumn_name());
+    list.add(present_column_name);
+    if (present_column_name)
+      list.add(column_name);
+
+    boolean present_check_expression = true && (isSetCheck_expression());
+    list.add(present_check_expression);
+    if (present_check_expression)
+      list.add(check_expression);
+
+    boolean present_dc_name = true && (isSetDc_name());
+    list.add(present_dc_name);
+    if (present_dc_name)
+      list.add(dc_name);
+
+    boolean present_enable_cstr = true;
+    list.add(present_enable_cstr);
+    if (present_enable_cstr)
+      list.add(enable_cstr);
+
+    boolean present_validate_cstr = true;
+    list.add(present_validate_cstr);
+    if (present_validate_cstr)
+      list.add(validate_cstr);
+
+    boolean present_rely_cstr = true;
+    list.add(present_rely_cstr);
+    if (present_rely_cstr)
+      list.add(rely_cstr);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(SQLCheckConstraint other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTable_db()).compareTo(other.isSetTable_db());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable_db()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_db, other.table_db);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTable_name()).compareTo(other.isSetTable_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTable_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_name, other.table_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetColumn_name()).compareTo(other.isSetColumn_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColumn_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_name, other.column_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCheck_expression()).compareTo(other.isSetCheck_expression());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCheck_expression()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.check_expression, other.check_expression);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDc_name()).compareTo(other.isSetDc_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDc_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dc_name, other.dc_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEnable_cstr()).compareTo(other.isSetEnable_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEnable_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.enable_cstr, other.enable_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetValidate_cstr()).compareTo(other.isSetValidate_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidate_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validate_cstr, other.validate_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRely_cstr()).compareTo(other.isSetRely_cstr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRely_cstr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rely_cstr, other.rely_cstr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("SQLCheckConstraint(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("table_db:");
+    if (this.table_db == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table_db);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("table_name:");
+    if (this.table_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.table_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("column_name:");
+    if (this.column_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.column_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("check_expression:");
+    if (this.check_expression == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.check_expression);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dc_name:");
+    if (this.dc_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dc_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("enable_cstr:");
+    sb.append(this.enable_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("validate_cstr:");
+    sb.append(this.validate_cstr);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("rely_cstr:");
+    sb.append(this.rely_cstr);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class SQLCheckConstraintStandardSchemeFactory implements SchemeFactory {
+    public SQLCheckConstraintStandardScheme getScheme() {
+      return new SQLCheckConstraintStandardScheme();
+    }
+  }
+
+  private static class SQLCheckConstraintStandardScheme extends StandardScheme<SQLCheckConstraint> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, SQLCheckConstraint struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_DB
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.table_db = iprot.readString();
+              struct.setTable_dbIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.table_name = iprot.readString();
+              struct.setTable_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // COLUMN_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.column_name = iprot.readString();
+              struct.setColumn_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // CHECK_EXPRESSION
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.check_expression = iprot.readString();
+              struct.setCheck_expressionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // DC_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dc_name = iprot.readString();
+              struct.setDc_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // ENABLE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.enable_cstr = iprot.readBool();
+              struct.setEnable_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // VALIDATE_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.validate_cstr = iprot.readBool();
+              struct.setValidate_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 9: // RELY_CSTR
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.rely_cstr = iprot.readBool();
+              struct.setRely_cstrIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, SQLCheckConstraint struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.table_db != null) {
+        oprot.writeFieldBegin(TABLE_DB_FIELD_DESC);
+        oprot.writeString(struct.table_db);
+        oprot.writeFieldEnd();
+      }
+      if (struct.table_name != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.table_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.column_name != null) {
+        oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC);
+        oprot.writeString(struct.column_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.check_expression != null) {
+        oprot.writeFieldBegin(CHECK_EXPRESSION_FIELD_DESC);
+        oprot.writeString(struct.check_expression);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dc_name != null) {
+        oprot.writeFieldBegin(DC_NAME_FIELD_DESC);
+        oprot.writeString(struct.dc_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(ENABLE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.enable_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(VALIDATE_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.validate_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(RELY_CSTR_FIELD_DESC);
+      oprot.writeBool(struct.rely_cstr);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class SQLCheckConstraintTupleSchemeFactory implements SchemeFactory {
+    public SQLCheckConstraintTupleScheme getScheme() {
+      return new SQLCheckConstraintTupleScheme();
+    }
+  }
+
+  private static class SQLCheckConstraintTupleScheme extends TupleScheme<SQLCheckConstraint> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, SQLCheckConstraint struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetCatName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTable_db()) {
+        optionals.set(1);
+      }
+      if (struct.isSetTable_name()) {
+        optionals.set(2);
+      }
+      if (struct.isSetColumn_name()) {
+        optionals.set(3);
+      }
+      if (struct.isSetCheck_expression()) {
+        optionals.set(4);
+      }
+      if (struct.isSetDc_name()) {
+        optionals.set(5);
+      }
+      if (struct.isSetEnable_cstr()) {
+        optionals.set(6);
+      }
+      if (struct.isSetValidate_cstr()) {
+        optionals.set(7);
+      }
+      if (struct.isSetRely_cstr()) {
+        optionals.set(8);
+      }
+      oprot.writeBitSet(optionals, 9);
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+      if (struct.isSetTable_db()) {
+        oprot.writeString(struct.table_db);
+      }
+      if (struct.isSetTable_name()) {
+        oprot.writeString(struct.table_name);
+      }
+      if (struct.isSetColumn_name()) {
+        oprot.writeString(struct.column_name);
+      }
+      if (struct.isSetCheck_expression()) {
+        oprot.writeString(struct.check_expression);
+      }
+      if (struct.isSetDc_name()) {
+        oprot.writeString(struct.dc_name);
+      }
+      if (struct.isSetEnable_cstr()) {
+        oprot.writeBool(struct.enable_cstr);
+      }
+      if (struct.isSetValidate_cstr()) {
+        oprot.writeBool(struct.validate_cstr);
+      }
+      if (struct.isSetRely_cstr()) {
+        oprot.writeBool(struct.rely_cstr);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, SQLCheckConstraint struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(9);
+      if (incoming.get(0)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.table_db = iprot.readString();
+        struct.setTable_dbIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.table_name = iprot.readString();
+        struct.setTable_nameIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.column_name = iprot.readString();
+        struct.setColumn_nameIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.check_expression = iprot.readString();
+        struct.setCheck_expressionIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.dc_name = iprot.readString();
+        struct.setDc_nameIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.enable_cstr = iprot.readBool();
+        struct.setEnable_cstrIsSet(true);
+      }
+      if (incoming.get(7)) {
+        struct.validate_cstr = iprot.readBool();
+        struct.setValidate_cstrIsSet(true);
+      }
+      if (incoming.get(8)) {
+        struct.rely_cstr = iprot.readBool();
+        struct.setRely_cstrIsSet(true);
+      }
+    }
+  }
+
+}
+


[45/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
new file mode 100644
index 0000000..f5913fc
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.h
@@ -0,0 +1,14125 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef hive_metastore_TYPES_H
+#define hive_metastore_TYPES_H
+
+#include <iosfwd>
+
+#include <thrift/Thrift.h>
+#include <thrift/TApplicationException.h>
+#include <thrift/protocol/TProtocol.h>
+#include <thrift/transport/TTransport.h>
+
+#include <thrift/cxxfunctional.h>
+#include "fb303_types.h"
+
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+struct HiveObjectType {
+  enum type {
+    GLOBAL = 1,
+    DATABASE = 2,
+    TABLE = 3,
+    PARTITION = 4,
+    COLUMN = 5
+  };
+};
+
+extern const std::map<int, const char*> _HiveObjectType_VALUES_TO_NAMES;
+
+struct PrincipalType {
+  enum type {
+    USER = 1,
+    ROLE = 2,
+    GROUP = 3
+  };
+};
+
+extern const std::map<int, const char*> _PrincipalType_VALUES_TO_NAMES;
+
+struct PartitionEventType {
+  enum type {
+    LOAD_DONE = 1
+  };
+};
+
+extern const std::map<int, const char*> _PartitionEventType_VALUES_TO_NAMES;
+
+struct TxnState {
+  enum type {
+    COMMITTED = 1,
+    ABORTED = 2,
+    OPEN = 3
+  };
+};
+
+extern const std::map<int, const char*> _TxnState_VALUES_TO_NAMES;
+
+struct LockLevel {
+  enum type {
+    DB = 1,
+    TABLE = 2,
+    PARTITION = 3
+  };
+};
+
+extern const std::map<int, const char*> _LockLevel_VALUES_TO_NAMES;
+
+struct LockState {
+  enum type {
+    ACQUIRED = 1,
+    WAITING = 2,
+    ABORT = 3,
+    NOT_ACQUIRED = 4
+  };
+};
+
+extern const std::map<int, const char*> _LockState_VALUES_TO_NAMES;
+
+struct LockType {
+  enum type {
+    SHARED_READ = 1,
+    SHARED_WRITE = 2,
+    EXCLUSIVE = 3
+  };
+};
+
+extern const std::map<int, const char*> _LockType_VALUES_TO_NAMES;
+
+struct CompactionType {
+  enum type {
+    MINOR = 1,
+    MAJOR = 2
+  };
+};
+
+extern const std::map<int, const char*> _CompactionType_VALUES_TO_NAMES;
+
+struct GrantRevokeType {
+  enum type {
+    GRANT = 1,
+    REVOKE = 2
+  };
+};
+
+extern const std::map<int, const char*> _GrantRevokeType_VALUES_TO_NAMES;
+
+struct DataOperationType {
+  enum type {
+    SELECT = 1,
+    INSERT = 2,
+    UPDATE = 3,
+    DELETE = 4,
+    UNSET = 5,
+    NO_TXN = 6
+  };
+};
+
+extern const std::map<int, const char*> _DataOperationType_VALUES_TO_NAMES;
+
+struct EventRequestType {
+  enum type {
+    INSERT = 1,
+    UPDATE = 2,
+    DELETE = 3
+  };
+};
+
+extern const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES;
+
+struct SerdeType {
+  enum type {
+    HIVE = 1,
+    SCHEMA_REGISTRY = 2
+  };
+};
+
+extern const std::map<int, const char*> _SerdeType_VALUES_TO_NAMES;
+
+struct SchemaType {
+  enum type {
+    HIVE = 1,
+    AVRO = 2
+  };
+};
+
+extern const std::map<int, const char*> _SchemaType_VALUES_TO_NAMES;
+
+struct SchemaCompatibility {
+  enum type {
+    NONE = 1,
+    BACKWARD = 2,
+    FORWARD = 3,
+    BOTH = 4
+  };
+};
+
+extern const std::map<int, const char*> _SchemaCompatibility_VALUES_TO_NAMES;
+
+struct SchemaValidation {
+  enum type {
+    LATEST = 1,
+    ALL = 2
+  };
+};
+
+extern const std::map<int, const char*> _SchemaValidation_VALUES_TO_NAMES;
+
+struct SchemaVersionState {
+  enum type {
+    INITIATED = 1,
+    START_REVIEW = 2,
+    CHANGES_REQUIRED = 3,
+    REVIEWED = 4,
+    ENABLED = 5,
+    DISABLED = 6,
+    ARCHIVED = 7,
+    DELETED = 8
+  };
+};
+
+extern const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES;
+
+struct FunctionType {
+  enum type {
+    JAVA = 1
+  };
+};
+
+extern const std::map<int, const char*> _FunctionType_VALUES_TO_NAMES;
+
+struct ResourceType {
+  enum type {
+    JAR = 1,
+    FILE = 2,
+    ARCHIVE = 3
+  };
+};
+
+extern const std::map<int, const char*> _ResourceType_VALUES_TO_NAMES;
+
+struct FileMetadataExprType {
+  enum type {
+    ORC_SARG = 1
+  };
+};
+
+extern const std::map<int, const char*> _FileMetadataExprType_VALUES_TO_NAMES;
+
+struct ClientCapability {
+  enum type {
+    TEST_CAPABILITY = 1,
+    INSERT_ONLY_TABLES = 2
+  };
+};
+
+extern const std::map<int, const char*> _ClientCapability_VALUES_TO_NAMES;
+
+struct WMResourcePlanStatus {
+  enum type {
+    ACTIVE = 1,
+    ENABLED = 2,
+    DISABLED = 3
+  };
+};
+
+extern const std::map<int, const char*> _WMResourcePlanStatus_VALUES_TO_NAMES;
+
+struct WMPoolSchedulingPolicy {
+  enum type {
+    FAIR = 1,
+    FIFO = 2
+  };
+};
+
+extern const std::map<int, const char*> _WMPoolSchedulingPolicy_VALUES_TO_NAMES;
+
+class Version;
+
+class FieldSchema;
+
+class SQLPrimaryKey;
+
+class SQLForeignKey;
+
+class SQLUniqueConstraint;
+
+class SQLNotNullConstraint;
+
+class SQLDefaultConstraint;
+
+class SQLCheckConstraint;
+
+class Type;
+
+class HiveObjectRef;
+
+class PrivilegeGrantInfo;
+
+class HiveObjectPrivilege;
+
+class PrivilegeBag;
+
+class PrincipalPrivilegeSet;
+
+class GrantRevokePrivilegeRequest;
+
+class GrantRevokePrivilegeResponse;
+
+class Role;
+
+class RolePrincipalGrant;
+
+class GetRoleGrantsForPrincipalRequest;
+
+class GetRoleGrantsForPrincipalResponse;
+
+class GetPrincipalsInRoleRequest;
+
+class GetPrincipalsInRoleResponse;
+
+class GrantRevokeRoleRequest;
+
+class GrantRevokeRoleResponse;
+
+class Catalog;
+
+class CreateCatalogRequest;
+
+class AlterCatalogRequest;
+
+class GetCatalogRequest;
+
+class GetCatalogResponse;
+
+class GetCatalogsResponse;
+
+class DropCatalogRequest;
+
+class Database;
+
+class SerDeInfo;
+
+class Order;
+
+class SkewedInfo;
+
+class StorageDescriptor;
+
+class Table;
+
+class Partition;
+
+class PartitionWithoutSD;
+
+class PartitionSpecWithSharedSD;
+
+class PartitionListComposingSpec;
+
+class PartitionSpec;
+
+class BooleanColumnStatsData;
+
+class DoubleColumnStatsData;
+
+class LongColumnStatsData;
+
+class StringColumnStatsData;
+
+class BinaryColumnStatsData;
+
+class Decimal;
+
+class DecimalColumnStatsData;
+
+class Date;
+
+class DateColumnStatsData;
+
+class ColumnStatisticsData;
+
+class ColumnStatisticsObj;
+
+class ColumnStatisticsDesc;
+
+class ColumnStatistics;
+
+class AggrStats;
+
+class SetPartitionsStatsRequest;
+
+class Schema;
+
+class EnvironmentContext;
+
+class PrimaryKeysRequest;
+
+class PrimaryKeysResponse;
+
+class ForeignKeysRequest;
+
+class ForeignKeysResponse;
+
+class UniqueConstraintsRequest;
+
+class UniqueConstraintsResponse;
+
+class NotNullConstraintsRequest;
+
+class NotNullConstraintsResponse;
+
+class DefaultConstraintsRequest;
+
+class DefaultConstraintsResponse;
+
+class CheckConstraintsRequest;
+
+class CheckConstraintsResponse;
+
+class DropConstraintRequest;
+
+class AddPrimaryKeyRequest;
+
+class AddForeignKeyRequest;
+
+class AddUniqueConstraintRequest;
+
+class AddNotNullConstraintRequest;
+
+class AddDefaultConstraintRequest;
+
+class AddCheckConstraintRequest;
+
+class PartitionsByExprResult;
+
+class PartitionsByExprRequest;
+
+class TableStatsResult;
+
+class PartitionsStatsResult;
+
+class TableStatsRequest;
+
+class PartitionsStatsRequest;
+
+class AddPartitionsResult;
+
+class AddPartitionsRequest;
+
+class DropPartitionsResult;
+
+class DropPartitionsExpr;
+
+class RequestPartsSpec;
+
+class DropPartitionsRequest;
+
+class PartitionValuesRequest;
+
+class PartitionValuesRow;
+
+class PartitionValuesResponse;
+
+class ResourceUri;
+
+class Function;
+
+class TxnInfo;
+
+class GetOpenTxnsInfoResponse;
+
+class GetOpenTxnsResponse;
+
+class OpenTxnRequest;
+
+class OpenTxnsResponse;
+
+class AbortTxnRequest;
+
+class AbortTxnsRequest;
+
+class CommitTxnRequest;
+
+class WriteEventInfo;
+
+class ReplTblWriteIdStateRequest;
+
+class GetValidWriteIdsRequest;
+
+class TableValidWriteIds;
+
+class GetValidWriteIdsResponse;
+
+class AllocateTableWriteIdsRequest;
+
+class TxnToWriteId;
+
+class AllocateTableWriteIdsResponse;
+
+class LockComponent;
+
+class LockRequest;
+
+class LockResponse;
+
+class CheckLockRequest;
+
+class UnlockRequest;
+
+class ShowLocksRequest;
+
+class ShowLocksResponseElement;
+
+class ShowLocksResponse;
+
+class HeartbeatRequest;
+
+class HeartbeatTxnRangeRequest;
+
+class HeartbeatTxnRangeResponse;
+
+class CompactionRequest;
+
+class CompactionResponse;
+
+class ShowCompactRequest;
+
+class ShowCompactResponseElement;
+
+class ShowCompactResponse;
+
+class AddDynamicPartitions;
+
+class BasicTxnInfo;
+
+class CreationMetadata;
+
+class NotificationEventRequest;
+
+class NotificationEvent;
+
+class NotificationEventResponse;
+
+class CurrentNotificationEventId;
+
+class NotificationEventsCountRequest;
+
+class NotificationEventsCountResponse;
+
+class InsertEventRequestData;
+
+class FireEventRequestData;
+
+class FireEventRequest;
+
+class FireEventResponse;
+
+class WriteNotificationLogRequest;
+
+class WriteNotificationLogResponse;
+
+class MetadataPpdResult;
+
+class GetFileMetadataByExprResult;
+
+class GetFileMetadataByExprRequest;
+
+class GetFileMetadataResult;
+
+class GetFileMetadataRequest;
+
+class PutFileMetadataResult;
+
+class PutFileMetadataRequest;
+
+class ClearFileMetadataResult;
+
+class ClearFileMetadataRequest;
+
+class CacheFileMetadataResult;
+
+class CacheFileMetadataRequest;
+
+class GetAllFunctionsResponse;
+
+class ClientCapabilities;
+
+class GetTableRequest;
+
+class GetTableResult;
+
+class GetTablesRequest;
+
+class GetTablesResult;
+
+class CmRecycleRequest;
+
+class CmRecycleResponse;
+
+class TableMeta;
+
+class Materialization;
+
+class WMResourcePlan;
+
+class WMNullableResourcePlan;
+
+class WMPool;
+
+class WMNullablePool;
+
+class WMTrigger;
+
+class WMMapping;
+
+class WMPoolTrigger;
+
+class WMFullResourcePlan;
+
+class WMCreateResourcePlanRequest;
+
+class WMCreateResourcePlanResponse;
+
+class WMGetActiveResourcePlanRequest;
+
+class WMGetActiveResourcePlanResponse;
+
+class WMGetResourcePlanRequest;
+
+class WMGetResourcePlanResponse;
+
+class WMGetAllResourcePlanRequest;
+
+class WMGetAllResourcePlanResponse;
+
+class WMAlterResourcePlanRequest;
+
+class WMAlterResourcePlanResponse;
+
+class WMValidateResourcePlanRequest;
+
+class WMValidateResourcePlanResponse;
+
+class WMDropResourcePlanRequest;
+
+class WMDropResourcePlanResponse;
+
+class WMCreateTriggerRequest;
+
+class WMCreateTriggerResponse;
+
+class WMAlterTriggerRequest;
+
+class WMAlterTriggerResponse;
+
+class WMDropTriggerRequest;
+
+class WMDropTriggerResponse;
+
+class WMGetTriggersForResourePlanRequest;
+
+class WMGetTriggersForResourePlanResponse;
+
+class WMCreatePoolRequest;
+
+class WMCreatePoolResponse;
+
+class WMAlterPoolRequest;
+
+class WMAlterPoolResponse;
+
+class WMDropPoolRequest;
+
+class WMDropPoolResponse;
+
+class WMCreateOrUpdateMappingRequest;
+
+class WMCreateOrUpdateMappingResponse;
+
+class WMDropMappingRequest;
+
+class WMDropMappingResponse;
+
+class WMCreateOrDropTriggerToPoolMappingRequest;
+
+class WMCreateOrDropTriggerToPoolMappingResponse;
+
+class ISchema;
+
+class ISchemaName;
+
+class AlterISchemaRequest;
+
+class SchemaVersion;
+
+class SchemaVersionDescriptor;
+
+class FindSchemasByColsRqst;
+
+class FindSchemasByColsResp;
+
+class MapSchemaVersionToSerdeRequest;
+
+class SetSchemaVersionStateRequest;
+
+class GetSerdeRequest;
+
+class RuntimeStat;
+
+class GetRuntimeStatsRequest;
+
+class MetaException;
+
+class UnknownTableException;
+
+class UnknownDBException;
+
+class AlreadyExistsException;
+
+class InvalidPartitionException;
+
+class UnknownPartitionException;
+
+class InvalidObjectException;
+
+class NoSuchObjectException;
+
+class InvalidOperationException;
+
+class ConfigValSecurityException;
+
+class InvalidInputException;
+
+class NoSuchTxnException;
+
+class TxnAbortedException;
+
+class TxnOpenException;
+
+class NoSuchLockException;
+
+typedef struct _Version__isset {
+  _Version__isset() : version(false), comments(false) {}
+  bool version :1;
+  bool comments :1;
+} _Version__isset;
+
+class Version {
+ public:
+
+  Version(const Version&);
+  Version& operator=(const Version&);
+  Version() : version(), comments() {
+  }
+
+  virtual ~Version() throw();
+  std::string version;
+  std::string comments;
+
+  _Version__isset __isset;
+
+  void __set_version(const std::string& val);
+
+  void __set_comments(const std::string& val);
+
+  bool operator == (const Version & rhs) const
+  {
+    if (!(version == rhs.version))
+      return false;
+    if (!(comments == rhs.comments))
+      return false;
+    return true;
+  }
+  bool operator != (const Version &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Version & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Version &a, Version &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Version& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _FieldSchema__isset {
+  _FieldSchema__isset() : name(false), type(false), comment(false) {}
+  bool name :1;
+  bool type :1;
+  bool comment :1;
+} _FieldSchema__isset;
+
+class FieldSchema {
+ public:
+
+  FieldSchema(const FieldSchema&);
+  FieldSchema& operator=(const FieldSchema&);
+  FieldSchema() : name(), type(), comment() {
+  }
+
+  virtual ~FieldSchema() throw();
+  std::string name;
+  std::string type;
+  std::string comment;
+
+  _FieldSchema__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_type(const std::string& val);
+
+  void __set_comment(const std::string& val);
+
+  bool operator == (const FieldSchema & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    if (!(type == rhs.type))
+      return false;
+    if (!(comment == rhs.comment))
+      return false;
+    return true;
+  }
+  bool operator != (const FieldSchema &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const FieldSchema & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(FieldSchema &a, FieldSchema &b);
+
+inline std::ostream& operator<<(std::ostream& out, const FieldSchema& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SQLPrimaryKey__isset {
+  _SQLPrimaryKey__isset() : table_db(false), table_name(false), column_name(false), key_seq(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false), catName(false) {}
+  bool table_db :1;
+  bool table_name :1;
+  bool column_name :1;
+  bool key_seq :1;
+  bool pk_name :1;
+  bool enable_cstr :1;
+  bool validate_cstr :1;
+  bool rely_cstr :1;
+  bool catName :1;
+} _SQLPrimaryKey__isset;
+
+class SQLPrimaryKey {
+ public:
+
+  SQLPrimaryKey(const SQLPrimaryKey&);
+  SQLPrimaryKey& operator=(const SQLPrimaryKey&);
+  SQLPrimaryKey() : table_db(), table_name(), column_name(), key_seq(0), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0), catName() {
+  }
+
+  virtual ~SQLPrimaryKey() throw();
+  std::string table_db;
+  std::string table_name;
+  std::string column_name;
+  int32_t key_seq;
+  std::string pk_name;
+  bool enable_cstr;
+  bool validate_cstr;
+  bool rely_cstr;
+  std::string catName;
+
+  _SQLPrimaryKey__isset __isset;
+
+  void __set_table_db(const std::string& val);
+
+  void __set_table_name(const std::string& val);
+
+  void __set_column_name(const std::string& val);
+
+  void __set_key_seq(const int32_t val);
+
+  void __set_pk_name(const std::string& val);
+
+  void __set_enable_cstr(const bool val);
+
+  void __set_validate_cstr(const bool val);
+
+  void __set_rely_cstr(const bool val);
+
+  void __set_catName(const std::string& val);
+
+  bool operator == (const SQLPrimaryKey & rhs) const
+  {
+    if (!(table_db == rhs.table_db))
+      return false;
+    if (!(table_name == rhs.table_name))
+      return false;
+    if (!(column_name == rhs.column_name))
+      return false;
+    if (!(key_seq == rhs.key_seq))
+      return false;
+    if (!(pk_name == rhs.pk_name))
+      return false;
+    if (!(enable_cstr == rhs.enable_cstr))
+      return false;
+    if (!(validate_cstr == rhs.validate_cstr))
+      return false;
+    if (!(rely_cstr == rhs.rely_cstr))
+      return false;
+    if (__isset.catName != rhs.__isset.catName)
+      return false;
+    else if (__isset.catName && !(catName == rhs.catName))
+      return false;
+    return true;
+  }
+  bool operator != (const SQLPrimaryKey &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SQLPrimaryKey & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SQLPrimaryKey &a, SQLPrimaryKey &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SQLPrimaryKey& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SQLForeignKey__isset {
+  _SQLForeignKey__isset() : pktable_db(false), pktable_name(false), pkcolumn_name(false), fktable_db(false), fktable_name(false), fkcolumn_name(false), key_seq(false), update_rule(false), delete_rule(false), fk_name(false), pk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false), catName(false) {}
+  bool pktable_db :1;
+  bool pktable_name :1;
+  bool pkcolumn_name :1;
+  bool fktable_db :1;
+  bool fktable_name :1;
+  bool fkcolumn_name :1;
+  bool key_seq :1;
+  bool update_rule :1;
+  bool delete_rule :1;
+  bool fk_name :1;
+  bool pk_name :1;
+  bool enable_cstr :1;
+  bool validate_cstr :1;
+  bool rely_cstr :1;
+  bool catName :1;
+} _SQLForeignKey__isset;
+
+class SQLForeignKey {
+ public:
+
+  SQLForeignKey(const SQLForeignKey&);
+  SQLForeignKey& operator=(const SQLForeignKey&);
+  SQLForeignKey() : pktable_db(), pktable_name(), pkcolumn_name(), fktable_db(), fktable_name(), fkcolumn_name(), key_seq(0), update_rule(0), delete_rule(0), fk_name(), pk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0), catName() {
+  }
+
+  virtual ~SQLForeignKey() throw();
+  std::string pktable_db;
+  std::string pktable_name;
+  std::string pkcolumn_name;
+  std::string fktable_db;
+  std::string fktable_name;
+  std::string fkcolumn_name;
+  int32_t key_seq;
+  int32_t update_rule;
+  int32_t delete_rule;
+  std::string fk_name;
+  std::string pk_name;
+  bool enable_cstr;
+  bool validate_cstr;
+  bool rely_cstr;
+  std::string catName;
+
+  _SQLForeignKey__isset __isset;
+
+  void __set_pktable_db(const std::string& val);
+
+  void __set_pktable_name(const std::string& val);
+
+  void __set_pkcolumn_name(const std::string& val);
+
+  void __set_fktable_db(const std::string& val);
+
+  void __set_fktable_name(const std::string& val);
+
+  void __set_fkcolumn_name(const std::string& val);
+
+  void __set_key_seq(const int32_t val);
+
+  void __set_update_rule(const int32_t val);
+
+  void __set_delete_rule(const int32_t val);
+
+  void __set_fk_name(const std::string& val);
+
+  void __set_pk_name(const std::string& val);
+
+  void __set_enable_cstr(const bool val);
+
+  void __set_validate_cstr(const bool val);
+
+  void __set_rely_cstr(const bool val);
+
+  void __set_catName(const std::string& val);
+
+  bool operator == (const SQLForeignKey & rhs) const
+  {
+    if (!(pktable_db == rhs.pktable_db))
+      return false;
+    if (!(pktable_name == rhs.pktable_name))
+      return false;
+    if (!(pkcolumn_name == rhs.pkcolumn_name))
+      return false;
+    if (!(fktable_db == rhs.fktable_db))
+      return false;
+    if (!(fktable_name == rhs.fktable_name))
+      return false;
+    if (!(fkcolumn_name == rhs.fkcolumn_name))
+      return false;
+    if (!(key_seq == rhs.key_seq))
+      return false;
+    if (!(update_rule == rhs.update_rule))
+      return false;
+    if (!(delete_rule == rhs.delete_rule))
+      return false;
+    if (!(fk_name == rhs.fk_name))
+      return false;
+    if (!(pk_name == rhs.pk_name))
+      return false;
+    if (!(enable_cstr == rhs.enable_cstr))
+      return false;
+    if (!(validate_cstr == rhs.validate_cstr))
+      return false;
+    if (!(rely_cstr == rhs.rely_cstr))
+      return false;
+    if (__isset.catName != rhs.__isset.catName)
+      return false;
+    else if (__isset.catName && !(catName == rhs.catName))
+      return false;
+    return true;
+  }
+  bool operator != (const SQLForeignKey &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SQLForeignKey & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SQLForeignKey &a, SQLForeignKey &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SQLForeignKey& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SQLUniqueConstraint__isset {
+  _SQLUniqueConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), key_seq(false), uk_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {}
+  bool catName :1;
+  bool table_db :1;
+  bool table_name :1;
+  bool column_name :1;
+  bool key_seq :1;
+  bool uk_name :1;
+  bool enable_cstr :1;
+  bool validate_cstr :1;
+  bool rely_cstr :1;
+} _SQLUniqueConstraint__isset;
+
+class SQLUniqueConstraint {
+ public:
+
+  SQLUniqueConstraint(const SQLUniqueConstraint&);
+  SQLUniqueConstraint& operator=(const SQLUniqueConstraint&);
+  SQLUniqueConstraint() : catName(), table_db(), table_name(), column_name(), key_seq(0), uk_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) {
+  }
+
+  virtual ~SQLUniqueConstraint() throw();
+  std::string catName;
+  std::string table_db;
+  std::string table_name;
+  std::string column_name;
+  int32_t key_seq;
+  std::string uk_name;
+  bool enable_cstr;
+  bool validate_cstr;
+  bool rely_cstr;
+
+  _SQLUniqueConstraint__isset __isset;
+
+  void __set_catName(const std::string& val);
+
+  void __set_table_db(const std::string& val);
+
+  void __set_table_name(const std::string& val);
+
+  void __set_column_name(const std::string& val);
+
+  void __set_key_seq(const int32_t val);
+
+  void __set_uk_name(const std::string& val);
+
+  void __set_enable_cstr(const bool val);
+
+  void __set_validate_cstr(const bool val);
+
+  void __set_rely_cstr(const bool val);
+
+  bool operator == (const SQLUniqueConstraint & rhs) const
+  {
+    if (!(catName == rhs.catName))
+      return false;
+    if (!(table_db == rhs.table_db))
+      return false;
+    if (!(table_name == rhs.table_name))
+      return false;
+    if (!(column_name == rhs.column_name))
+      return false;
+    if (!(key_seq == rhs.key_seq))
+      return false;
+    if (!(uk_name == rhs.uk_name))
+      return false;
+    if (!(enable_cstr == rhs.enable_cstr))
+      return false;
+    if (!(validate_cstr == rhs.validate_cstr))
+      return false;
+    if (!(rely_cstr == rhs.rely_cstr))
+      return false;
+    return true;
+  }
+  bool operator != (const SQLUniqueConstraint &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SQLUniqueConstraint & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SQLUniqueConstraint &a, SQLUniqueConstraint &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SQLUniqueConstraint& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SQLNotNullConstraint__isset {
+  _SQLNotNullConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), nn_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {}
+  bool catName :1;
+  bool table_db :1;
+  bool table_name :1;
+  bool column_name :1;
+  bool nn_name :1;
+  bool enable_cstr :1;
+  bool validate_cstr :1;
+  bool rely_cstr :1;
+} _SQLNotNullConstraint__isset;
+
+class SQLNotNullConstraint {
+ public:
+
+  SQLNotNullConstraint(const SQLNotNullConstraint&);
+  SQLNotNullConstraint& operator=(const SQLNotNullConstraint&);
+  SQLNotNullConstraint() : catName(), table_db(), table_name(), column_name(), nn_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) {
+  }
+
+  virtual ~SQLNotNullConstraint() throw();
+  std::string catName;
+  std::string table_db;
+  std::string table_name;
+  std::string column_name;
+  std::string nn_name;
+  bool enable_cstr;
+  bool validate_cstr;
+  bool rely_cstr;
+
+  _SQLNotNullConstraint__isset __isset;
+
+  void __set_catName(const std::string& val);
+
+  void __set_table_db(const std::string& val);
+
+  void __set_table_name(const std::string& val);
+
+  void __set_column_name(const std::string& val);
+
+  void __set_nn_name(const std::string& val);
+
+  void __set_enable_cstr(const bool val);
+
+  void __set_validate_cstr(const bool val);
+
+  void __set_rely_cstr(const bool val);
+
+  bool operator == (const SQLNotNullConstraint & rhs) const
+  {
+    if (!(catName == rhs.catName))
+      return false;
+    if (!(table_db == rhs.table_db))
+      return false;
+    if (!(table_name == rhs.table_name))
+      return false;
+    if (!(column_name == rhs.column_name))
+      return false;
+    if (!(nn_name == rhs.nn_name))
+      return false;
+    if (!(enable_cstr == rhs.enable_cstr))
+      return false;
+    if (!(validate_cstr == rhs.validate_cstr))
+      return false;
+    if (!(rely_cstr == rhs.rely_cstr))
+      return false;
+    return true;
+  }
+  bool operator != (const SQLNotNullConstraint &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SQLNotNullConstraint & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SQLNotNullConstraint &a, SQLNotNullConstraint &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SQLNotNullConstraint& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SQLDefaultConstraint__isset {
+  _SQLDefaultConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), default_value(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {}
+  bool catName :1;
+  bool table_db :1;
+  bool table_name :1;
+  bool column_name :1;
+  bool default_value :1;
+  bool dc_name :1;
+  bool enable_cstr :1;
+  bool validate_cstr :1;
+  bool rely_cstr :1;
+} _SQLDefaultConstraint__isset;
+
+class SQLDefaultConstraint {
+ public:
+
+  SQLDefaultConstraint(const SQLDefaultConstraint&);
+  SQLDefaultConstraint& operator=(const SQLDefaultConstraint&);
+  SQLDefaultConstraint() : catName(), table_db(), table_name(), column_name(), default_value(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) {
+  }
+
+  virtual ~SQLDefaultConstraint() throw();
+  std::string catName;
+  std::string table_db;
+  std::string table_name;
+  std::string column_name;
+  std::string default_value;
+  std::string dc_name;
+  bool enable_cstr;
+  bool validate_cstr;
+  bool rely_cstr;
+
+  _SQLDefaultConstraint__isset __isset;
+
+  void __set_catName(const std::string& val);
+
+  void __set_table_db(const std::string& val);
+
+  void __set_table_name(const std::string& val);
+
+  void __set_column_name(const std::string& val);
+
+  void __set_default_value(const std::string& val);
+
+  void __set_dc_name(const std::string& val);
+
+  void __set_enable_cstr(const bool val);
+
+  void __set_validate_cstr(const bool val);
+
+  void __set_rely_cstr(const bool val);
+
+  bool operator == (const SQLDefaultConstraint & rhs) const
+  {
+    if (!(catName == rhs.catName))
+      return false;
+    if (!(table_db == rhs.table_db))
+      return false;
+    if (!(table_name == rhs.table_name))
+      return false;
+    if (!(column_name == rhs.column_name))
+      return false;
+    if (!(default_value == rhs.default_value))
+      return false;
+    if (!(dc_name == rhs.dc_name))
+      return false;
+    if (!(enable_cstr == rhs.enable_cstr))
+      return false;
+    if (!(validate_cstr == rhs.validate_cstr))
+      return false;
+    if (!(rely_cstr == rhs.rely_cstr))
+      return false;
+    return true;
+  }
+  bool operator != (const SQLDefaultConstraint &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SQLDefaultConstraint & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SQLDefaultConstraint& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SQLCheckConstraint__isset {
+  _SQLCheckConstraint__isset() : catName(false), table_db(false), table_name(false), column_name(false), check_expression(false), dc_name(false), enable_cstr(false), validate_cstr(false), rely_cstr(false) {}
+  bool catName :1;
+  bool table_db :1;
+  bool table_name :1;
+  bool column_name :1;
+  bool check_expression :1;
+  bool dc_name :1;
+  bool enable_cstr :1;
+  bool validate_cstr :1;
+  bool rely_cstr :1;
+} _SQLCheckConstraint__isset;
+
+class SQLCheckConstraint {
+ public:
+
+  SQLCheckConstraint(const SQLCheckConstraint&);
+  SQLCheckConstraint& operator=(const SQLCheckConstraint&);
+  SQLCheckConstraint() : catName(), table_db(), table_name(), column_name(), check_expression(), dc_name(), enable_cstr(0), validate_cstr(0), rely_cstr(0) {
+  }
+
+  virtual ~SQLCheckConstraint() throw();
+  std::string catName;
+  std::string table_db;
+  std::string table_name;
+  std::string column_name;
+  std::string check_expression;
+  std::string dc_name;
+  bool enable_cstr;
+  bool validate_cstr;
+  bool rely_cstr;
+
+  _SQLCheckConstraint__isset __isset;
+
+  void __set_catName(const std::string& val);
+
+  void __set_table_db(const std::string& val);
+
+  void __set_table_name(const std::string& val);
+
+  void __set_column_name(const std::string& val);
+
+  void __set_check_expression(const std::string& val);
+
+  void __set_dc_name(const std::string& val);
+
+  void __set_enable_cstr(const bool val);
+
+  void __set_validate_cstr(const bool val);
+
+  void __set_rely_cstr(const bool val);
+
+  bool operator == (const SQLCheckConstraint & rhs) const
+  {
+    if (!(catName == rhs.catName))
+      return false;
+    if (!(table_db == rhs.table_db))
+      return false;
+    if (!(table_name == rhs.table_name))
+      return false;
+    if (!(column_name == rhs.column_name))
+      return false;
+    if (!(check_expression == rhs.check_expression))
+      return false;
+    if (!(dc_name == rhs.dc_name))
+      return false;
+    if (!(enable_cstr == rhs.enable_cstr))
+      return false;
+    if (!(validate_cstr == rhs.validate_cstr))
+      return false;
+    if (!(rely_cstr == rhs.rely_cstr))
+      return false;
+    return true;
+  }
+  bool operator != (const SQLCheckConstraint &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SQLCheckConstraint & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SQLCheckConstraint &a, SQLCheckConstraint &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SQLCheckConstraint& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Type__isset {
+  _Type__isset() : name(false), type1(false), type2(false), fields(false) {}
+  bool name :1;
+  bool type1 :1;
+  bool type2 :1;
+  bool fields :1;
+} _Type__isset;
+
+class Type {
+ public:
+
+  Type(const Type&);
+  Type& operator=(const Type&);
+  Type() : name(), type1(), type2() {
+  }
+
+  virtual ~Type() throw();
+  std::string name;
+  std::string type1;
+  std::string type2;
+  std::vector<FieldSchema>  fields;
+
+  _Type__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_type1(const std::string& val);
+
+  void __set_type2(const std::string& val);
+
+  void __set_fields(const std::vector<FieldSchema> & val);
+
+  bool operator == (const Type & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    if (__isset.type1 != rhs.__isset.type1)
+      return false;
+    else if (__isset.type1 && !(type1 == rhs.type1))
+      return false;
+    if (__isset.type2 != rhs.__isset.type2)
+      return false;
+    else if (__isset.type2 && !(type2 == rhs.type2))
+      return false;
+    if (__isset.fields != rhs.__isset.fields)
+      return false;
+    else if (__isset.fields && !(fields == rhs.fields))
+      return false;
+    return true;
+  }
+  bool operator != (const Type &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Type & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Type &a, Type &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Type& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _HiveObjectRef__isset {
+  _HiveObjectRef__isset() : objectType(false), dbName(false), objectName(false), partValues(false), columnName(false), catName(false) {}
+  bool objectType :1;
+  bool dbName :1;
+  bool objectName :1;
+  bool partValues :1;
+  bool columnName :1;
+  bool catName :1;
+} _HiveObjectRef__isset;
+
+class HiveObjectRef {
+ public:
+
+  HiveObjectRef(const HiveObjectRef&);
+  HiveObjectRef& operator=(const HiveObjectRef&);
+  HiveObjectRef() : objectType((HiveObjectType::type)0), dbName(), objectName(), columnName(), catName() {
+  }
+
+  virtual ~HiveObjectRef() throw();
+  HiveObjectType::type objectType;
+  std::string dbName;
+  std::string objectName;
+  std::vector<std::string>  partValues;
+  std::string columnName;
+  std::string catName;
+
+  _HiveObjectRef__isset __isset;
+
+  void __set_objectType(const HiveObjectType::type val);
+
+  void __set_dbName(const std::string& val);
+
+  void __set_objectName(const std::string& val);
+
+  void __set_partValues(const std::vector<std::string> & val);
+
+  void __set_columnName(const std::string& val);
+
+  void __set_catName(const std::string& val);
+
+  bool operator == (const HiveObjectRef & rhs) const
+  {
+    if (!(objectType == rhs.objectType))
+      return false;
+    if (!(dbName == rhs.dbName))
+      return false;
+    if (!(objectName == rhs.objectName))
+      return false;
+    if (!(partValues == rhs.partValues))
+      return false;
+    if (!(columnName == rhs.columnName))
+      return false;
+    if (__isset.catName != rhs.__isset.catName)
+      return false;
+    else if (__isset.catName && !(catName == rhs.catName))
+      return false;
+    return true;
+  }
+  bool operator != (const HiveObjectRef &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const HiveObjectRef & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(HiveObjectRef &a, HiveObjectRef &b);
+
+inline std::ostream& operator<<(std::ostream& out, const HiveObjectRef& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _PrivilegeGrantInfo__isset {
+  _PrivilegeGrantInfo__isset() : privilege(false), createTime(false), grantor(false), grantorType(false), grantOption(false) {}
+  bool privilege :1;
+  bool createTime :1;
+  bool grantor :1;
+  bool grantorType :1;
+  bool grantOption :1;
+} _PrivilegeGrantInfo__isset;
+
+class PrivilegeGrantInfo {
+ public:
+
+  PrivilegeGrantInfo(const PrivilegeGrantInfo&);
+  PrivilegeGrantInfo& operator=(const PrivilegeGrantInfo&);
+  PrivilegeGrantInfo() : privilege(), createTime(0), grantor(), grantorType((PrincipalType::type)0), grantOption(0) {
+  }
+
+  virtual ~PrivilegeGrantInfo() throw();
+  std::string privilege;
+  int32_t createTime;
+  std::string grantor;
+  PrincipalType::type grantorType;
+  bool grantOption;
+
+  _PrivilegeGrantInfo__isset __isset;
+
+  void __set_privilege(const std::string& val);
+
+  void __set_createTime(const int32_t val);
+
+  void __set_grantor(const std::string& val);
+
+  void __set_grantorType(const PrincipalType::type val);
+
+  void __set_grantOption(const bool val);
+
+  bool operator == (const PrivilegeGrantInfo & rhs) const
+  {
+    if (!(privilege == rhs.privilege))
+      return false;
+    if (!(createTime == rhs.createTime))
+      return false;
+    if (!(grantor == rhs.grantor))
+      return false;
+    if (!(grantorType == rhs.grantorType))
+      return false;
+    if (!(grantOption == rhs.grantOption))
+      return false;
+    return true;
+  }
+  bool operator != (const PrivilegeGrantInfo &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const PrivilegeGrantInfo & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(PrivilegeGrantInfo &a, PrivilegeGrantInfo &b);
+
+inline std::ostream& operator<<(std::ostream& out, const PrivilegeGrantInfo& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _HiveObjectPrivilege__isset {
+  _HiveObjectPrivilege__isset() : hiveObject(false), principalName(false), principalType(false), grantInfo(false), authorizer(false) {}
+  bool hiveObject :1;
+  bool principalName :1;
+  bool principalType :1;
+  bool grantInfo :1;
+  bool authorizer :1;
+} _HiveObjectPrivilege__isset;
+
+class HiveObjectPrivilege {
+ public:
+
+  HiveObjectPrivilege(const HiveObjectPrivilege&);
+  HiveObjectPrivilege& operator=(const HiveObjectPrivilege&);
+  HiveObjectPrivilege() : principalName(), principalType((PrincipalType::type)0), authorizer() {
+  }
+
+  virtual ~HiveObjectPrivilege() throw();
+  HiveObjectRef hiveObject;
+  std::string principalName;
+  PrincipalType::type principalType;
+  PrivilegeGrantInfo grantInfo;
+  std::string authorizer;
+
+  _HiveObjectPrivilege__isset __isset;
+
+  void __set_hiveObject(const HiveObjectRef& val);
+
+  void __set_principalName(const std::string& val);
+
+  void __set_principalType(const PrincipalType::type val);
+
+  void __set_grantInfo(const PrivilegeGrantInfo& val);
+
+  void __set_authorizer(const std::string& val);
+
+  bool operator == (const HiveObjectPrivilege & rhs) const
+  {
+    if (!(hiveObject == rhs.hiveObject))
+      return false;
+    if (!(principalName == rhs.principalName))
+      return false;
+    if (!(principalType == rhs.principalType))
+      return false;
+    if (!(grantInfo == rhs.grantInfo))
+      return false;
+    if (!(authorizer == rhs.authorizer))
+      return false;
+    return true;
+  }
+  bool operator != (const HiveObjectPrivilege &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const HiveObjectPrivilege & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(HiveObjectPrivilege &a, HiveObjectPrivilege &b);
+
+inline std::ostream& operator<<(std::ostream& out, const HiveObjectPrivilege& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _PrivilegeBag__isset {
+  _PrivilegeBag__isset() : privileges(false) {}
+  bool privileges :1;
+} _PrivilegeBag__isset;
+
+class PrivilegeBag {
+ public:
+
+  PrivilegeBag(const PrivilegeBag&);
+  PrivilegeBag& operator=(const PrivilegeBag&);
+  PrivilegeBag() {
+  }
+
+  virtual ~PrivilegeBag() throw();
+  std::vector<HiveObjectPrivilege>  privileges;
+
+  _PrivilegeBag__isset __isset;
+
+  void __set_privileges(const std::vector<HiveObjectPrivilege> & val);
+
+  bool operator == (const PrivilegeBag & rhs) const
+  {
+    if (!(privileges == rhs.privileges))
+      return false;
+    return true;
+  }
+  bool operator != (const PrivilegeBag &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const PrivilegeBag & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(PrivilegeBag &a, PrivilegeBag &b);
+
+inline std::ostream& operator<<(std::ostream& out, const PrivilegeBag& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _PrincipalPrivilegeSet__isset {
+  _PrincipalPrivilegeSet__isset() : userPrivileges(false), groupPrivileges(false), rolePrivileges(false) {}
+  bool userPrivileges :1;
+  bool groupPrivileges :1;
+  bool rolePrivileges :1;
+} _PrincipalPrivilegeSet__isset;
+
+class PrincipalPrivilegeSet {
+ public:
+
+  PrincipalPrivilegeSet(const PrincipalPrivilegeSet&);
+  PrincipalPrivilegeSet& operator=(const PrincipalPrivilegeSet&);
+  PrincipalPrivilegeSet() {
+  }
+
+  virtual ~PrincipalPrivilegeSet() throw();
+  std::map<std::string, std::vector<PrivilegeGrantInfo> >  userPrivileges;
+  std::map<std::string, std::vector<PrivilegeGrantInfo> >  groupPrivileges;
+  std::map<std::string, std::vector<PrivilegeGrantInfo> >  rolePrivileges;
+
+  _PrincipalPrivilegeSet__isset __isset;
+
+  void __set_userPrivileges(const std::map<std::string, std::vector<PrivilegeGrantInfo> > & val);
+
+  void __set_groupPrivileges(const std::map<std::string, std::vector<PrivilegeGrantInfo> > & val);
+
+  void __set_rolePrivileges(const std::map<std::string, std::vector<PrivilegeGrantInfo> > & val);
+
+  bool operator == (const PrincipalPrivilegeSet & rhs) const
+  {
+    if (!(userPrivileges == rhs.userPrivileges))
+      return false;
+    if (!(groupPrivileges == rhs.groupPrivileges))
+      return false;
+    if (!(rolePrivileges == rhs.rolePrivileges))
+      return false;
+    return true;
+  }
+  bool operator != (const PrincipalPrivilegeSet &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const PrincipalPrivilegeSet & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(PrincipalPrivilegeSet &a, PrincipalPrivilegeSet &b);
+
+inline std::ostream& operator<<(std::ostream& out, const PrincipalPrivilegeSet& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GrantRevokePrivilegeRequest__isset {
+  _GrantRevokePrivilegeRequest__isset() : requestType(false), privileges(false), revokeGrantOption(false) {}
+  bool requestType :1;
+  bool privileges :1;
+  bool revokeGrantOption :1;
+} _GrantRevokePrivilegeRequest__isset;
+
+class GrantRevokePrivilegeRequest {
+ public:
+
+  GrantRevokePrivilegeRequest(const GrantRevokePrivilegeRequest&);
+  GrantRevokePrivilegeRequest& operator=(const GrantRevokePrivilegeRequest&);
+  GrantRevokePrivilegeRequest() : requestType((GrantRevokeType::type)0), revokeGrantOption(0) {
+  }
+
+  virtual ~GrantRevokePrivilegeRequest() throw();
+  GrantRevokeType::type requestType;
+  PrivilegeBag privileges;
+  bool revokeGrantOption;
+
+  _GrantRevokePrivilegeRequest__isset __isset;
+
+  void __set_requestType(const GrantRevokeType::type val);
+
+  void __set_privileges(const PrivilegeBag& val);
+
+  void __set_revokeGrantOption(const bool val);
+
+  bool operator == (const GrantRevokePrivilegeRequest & rhs) const
+  {
+    if (!(requestType == rhs.requestType))
+      return false;
+    if (!(privileges == rhs.privileges))
+      return false;
+    if (__isset.revokeGrantOption != rhs.__isset.revokeGrantOption)
+      return false;
+    else if (__isset.revokeGrantOption && !(revokeGrantOption == rhs.revokeGrantOption))
+      return false;
+    return true;
+  }
+  bool operator != (const GrantRevokePrivilegeRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GrantRevokePrivilegeRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GrantRevokePrivilegeRequest &a, GrantRevokePrivilegeRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GrantRevokePrivilegeRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GrantRevokePrivilegeResponse__isset {
+  _GrantRevokePrivilegeResponse__isset() : success(false) {}
+  bool success :1;
+} _GrantRevokePrivilegeResponse__isset;
+
+class GrantRevokePrivilegeResponse {
+ public:
+
+  GrantRevokePrivilegeResponse(const GrantRevokePrivilegeResponse&);
+  GrantRevokePrivilegeResponse& operator=(const GrantRevokePrivilegeResponse&);
+  GrantRevokePrivilegeResponse() : success(0) {
+  }
+
+  virtual ~GrantRevokePrivilegeResponse() throw();
+  bool success;
+
+  _GrantRevokePrivilegeResponse__isset __isset;
+
+  void __set_success(const bool val);
+
+  bool operator == (const GrantRevokePrivilegeResponse & rhs) const
+  {
+    if (__isset.success != rhs.__isset.success)
+      return false;
+    else if (__isset.success && !(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const GrantRevokePrivilegeResponse &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GrantRevokePrivilegeResponse & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GrantRevokePrivilegeResponse &a, GrantRevokePrivilegeResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GrantRevokePrivilegeResponse& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Role__isset {
+  _Role__isset() : roleName(false), createTime(false), ownerName(false) {}
+  bool roleName :1;
+  bool createTime :1;
+  bool ownerName :1;
+} _Role__isset;
+
+class Role {
+ public:
+
+  Role(const Role&);
+  Role& operator=(const Role&);
+  Role() : roleName(), createTime(0), ownerName() {
+  }
+
+  virtual ~Role() throw();
+  std::string roleName;
+  int32_t createTime;
+  std::string ownerName;
+
+  _Role__isset __isset;
+
+  void __set_roleName(const std::string& val);
+
+  void __set_createTime(const int32_t val);
+
+  void __set_ownerName(const std::string& val);
+
+  bool operator == (const Role & rhs) const
+  {
+    if (!(roleName == rhs.roleName))
+      return false;
+    if (!(createTime == rhs.createTime))
+      return false;
+    if (!(ownerName == rhs.ownerName))
+      return false;
+    return true;
+  }
+  bool operator != (const Role &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Role & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Role &a, Role &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Role& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _RolePrincipalGrant__isset {
+  _RolePrincipalGrant__isset() : roleName(false), principalName(false), principalType(false), grantOption(false), grantTime(false), grantorName(false), grantorPrincipalType(false) {}
+  bool roleName :1;
+  bool principalName :1;
+  bool principalType :1;
+  bool grantOption :1;
+  bool grantTime :1;
+  bool grantorName :1;
+  bool grantorPrincipalType :1;
+} _RolePrincipalGrant__isset;
+
+class RolePrincipalGrant {
+ public:
+
+  RolePrincipalGrant(const RolePrincipalGrant&);
+  RolePrincipalGrant& operator=(const RolePrincipalGrant&);
+  RolePrincipalGrant() : roleName(), principalName(), principalType((PrincipalType::type)0), grantOption(0), grantTime(0), grantorName(), grantorPrincipalType((PrincipalType::type)0) {
+  }
+
+  virtual ~RolePrincipalGrant() throw();
+  std::string roleName;
+  std::string principalName;
+  PrincipalType::type principalType;
+  bool grantOption;
+  int32_t grantTime;
+  std::string grantorName;
+  PrincipalType::type grantorPrincipalType;
+
+  _RolePrincipalGrant__isset __isset;
+
+  void __set_roleName(const std::string& val);
+
+  void __set_principalName(const std::string& val);
+
+  void __set_principalType(const PrincipalType::type val);
+
+  void __set_grantOption(const bool val);
+
+  void __set_grantTime(const int32_t val);
+
+  void __set_grantorName(const std::string& val);
+
+  void __set_grantorPrincipalType(const PrincipalType::type val);
+
+  bool operator == (const RolePrincipalGrant & rhs) const
+  {
+    if (!(roleName == rhs.roleName))
+      return false;
+    if (!(principalName == rhs.principalName))
+      return false;
+    if (!(principalType == rhs.principalType))
+      return false;
+    if (!(grantOption == rhs.grantOption))
+      return false;
+    if (!(grantTime == rhs.grantTime))
+      return false;
+    if (!(grantorName == rhs.grantorName))
+      return false;
+    if (!(grantorPrincipalType == rhs.grantorPrincipalType))
+      return false;
+    return true;
+  }
+  bool operator != (const RolePrincipalGrant &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const RolePrincipalGrant & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(RolePrincipalGrant &a, RolePrincipalGrant &b);
+
+inline std::ostream& operator<<(std::ostream& out, const RolePrincipalGrant& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+class GetRoleGrantsForPrincipalRequest {
+ public:
+
+  GetRoleGrantsForPrincipalRequest(const GetRoleGrantsForPrincipalRequest&);
+  GetRoleGrantsForPrincipalRequest& operator=(const GetRoleGrantsForPrincipalRequest&);
+  GetRoleGrantsForPrincipalRequest() : principal_name(), principal_type((PrincipalType::type)0) {
+  }
+
+  virtual ~GetRoleGrantsForPrincipalRequest() throw();
+  std::string principal_name;
+  PrincipalType::type principal_type;
+
+  void __set_principal_name(const std::string& val);
+
+  void __set_principal_type(const PrincipalType::type val);
+
+  bool operator == (const GetRoleGrantsForPrincipalRequest & rhs) const
+  {
+    if (!(principal_name == rhs.principal_name))
+      return false;
+    if (!(principal_type == rhs.principal_type))
+      return false;
+    return true;
+  }
+  bool operator != (const GetRoleGrantsForPrincipalRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetRoleGrantsForPrincipalRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetRoleGrantsForPrincipalRequest &a, GetRoleGrantsForPrincipalRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetRoleGrantsForPrincipalRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+class GetRoleGrantsForPrincipalResponse {
+ public:
+
+  GetRoleGrantsForPrincipalResponse(const GetRoleGrantsForPrincipalResponse&);
+  GetRoleGrantsForPrincipalResponse& operator=(const GetRoleGrantsForPrincipalResponse&);
+  GetRoleGrantsForPrincipalResponse() {
+  }
+
+  virtual ~GetRoleGrantsForPrincipalResponse() throw();
+  std::vector<RolePrincipalGrant>  principalGrants;
+
+  void __set_principalGrants(const std::vector<RolePrincipalGrant> & val);
+
+  bool operator == (const GetRoleGrantsForPrincipalResponse & rhs) const
+  {
+    if (!(principalGrants == rhs.principalGrants))
+      return false;
+    return true;
+  }
+  bool operator != (const GetRoleGrantsForPrincipalResponse &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetRoleGrantsForPrincipalResponse & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetRoleGrantsForPrincipalResponse &a, GetRoleGrantsForPrincipalResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetRoleGrantsForPrincipalResponse& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+class GetPrincipalsInRoleRequest {
+ public:
+
+  GetPrincipalsInRoleRequest(const GetPrincipalsInRoleRequest&);
+  GetPrincipalsInRoleRequest& operator=(const GetPrincipalsInRoleRequest&);
+  GetPrincipalsInRoleRequest() : roleName() {
+  }
+
+  virtual ~GetPrincipalsInRoleRequest() throw();
+  std::string roleName;
+
+  void __set_roleName(const std::string& val);
+
+  bool operator == (const GetPrincipalsInRoleRequest & rhs) const
+  {
+    if (!(roleName == rhs.roleName))
+      return false;
+    return true;
+  }
+  bool operator != (const GetPrincipalsInRoleRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetPrincipalsInRoleRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetPrincipalsInRoleRequest &a, GetPrincipalsInRoleRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetPrincipalsInRoleRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+
+class GetPrincipalsInRoleResponse {
+ public:
+
+  GetPrincipalsInRoleResponse(const GetPrincipalsInRoleResponse&);
+  GetPrincipalsInRoleResponse& operator=(const GetPrincipalsInRoleResponse&);
+  GetPrincipalsInRoleResponse() {
+  }
+
+  virtual ~GetPrincipalsInRoleResponse() throw();
+  std::vector<RolePrincipalGrant>  principalGrants;
+
+  void __set_principalGrants(const std::vector<RolePrincipalGrant> & val);
+
+  bool operator == (const GetPrincipalsInRoleResponse & rhs) const
+  {
+    if (!(principalGrants == rhs.principalGrants))
+      return false;
+    return true;
+  }
+  bool operator != (const GetPrincipalsInRoleResponse &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetPrincipalsInRoleResponse & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetPrincipalsInRoleResponse &a, GetPrincipalsInRoleResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetPrincipalsInRoleResponse& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GrantRevokeRoleRequest__isset {
+  _GrantRevokeRoleRequest__isset() : requestType(false), roleName(false), principalName(false), principalType(false), grantor(false), grantorType(false), grantOption(false) {}
+  bool requestType :1;
+  bool roleName :1;
+  bool principalName :1;
+  bool principalType :1;
+  bool grantor :1;
+  bool grantorType :1;
+  bool grantOption :1;
+} _GrantRevokeRoleRequest__isset;
+
+class GrantRevokeRoleRequest {
+ public:
+
+  GrantRevokeRoleRequest(const GrantRevokeRoleRequest&);
+  GrantRevokeRoleRequest& operator=(const GrantRevokeRoleRequest&);
+  GrantRevokeRoleRequest() : requestType((GrantRevokeType::type)0), roleName(), principalName(), principalType((PrincipalType::type)0), grantor(), grantorType((PrincipalType::type)0), grantOption(0) {
+  }
+
+  virtual ~GrantRevokeRoleRequest() throw();
+  GrantRevokeType::type requestType;
+  std::string roleName;
+  std::string principalName;
+  PrincipalType::type principalType;
+  std::string grantor;
+  PrincipalType::type grantorType;
+  bool grantOption;
+
+  _GrantRevokeRoleRequest__isset __isset;
+
+  void __set_requestType(const GrantRevokeType::type val);
+
+  void __set_roleName(const std::string& val);
+
+  void __set_principalName(const std::string& val);
+
+  void __set_principalType(const PrincipalType::type val);
+
+  void __set_grantor(const std::string& val);
+
+  void __set_grantorType(const PrincipalType::type val);
+
+  void __set_grantOption(const bool val);
+
+  bool operator == (const GrantRevokeRoleRequest & rhs) const
+  {
+    if (!(requestType == rhs.requestType))
+      return false;
+    if (!(roleName == rhs.roleName))
+      return false;
+    if (!(principalName == rhs.principalName))
+      return false;
+    if (!(principalType == rhs.principalType))
+      return false;
+    if (__isset.grantor != rhs.__isset.grantor)
+      return false;
+    else if (__isset.grantor && !(grantor == rhs.grantor))
+      return false;
+    if (__isset.grantorType != rhs.__isset.grantorType)
+      return false;
+    else if (__isset.grantorType && !(grantorType == rhs.grantorType))
+      return false;
+    if (__isset.grantOption != rhs.__isset.grantOption)
+      return false;
+    else if (__isset.grantOption && !(grantOption == rhs.grantOption))
+      return false;
+    return true;
+  }
+  bool operator != (const GrantRevokeRoleRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GrantRevokeRoleRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GrantRevokeRoleRequest &a, GrantRevokeRoleRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GrantRevokeRoleRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GrantRevokeRoleResponse__isset {
+  _GrantRevokeRoleResponse__isset() : success(false) {}
+  bool success :1;
+} _GrantRevokeRoleResponse__isset;
+
+class GrantRevokeRoleResponse {
+ public:
+
+  GrantRevokeRoleResponse(const GrantRevokeRoleResponse&);
+  GrantRevokeRoleResponse& operator=(const GrantRevokeRoleResponse&);
+  GrantRevokeRoleResponse() : success(0) {
+  }
+
+  virtual ~GrantRevokeRoleResponse() throw();
+  bool success;
+
+  _GrantRevokeRoleResponse__isset __isset;
+
+  void __set_success(const bool val);
+
+  bool operator == (const GrantRevokeRoleResponse & rhs) const
+  {
+    if (__isset.success != rhs.__isset.success)
+      return false;
+    else if (__isset.success && !(success == rhs.success))
+      return false;
+    return true;
+  }
+  bool operator != (const GrantRevokeRoleResponse &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GrantRevokeRoleResponse & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GrantRevokeRoleResponse &a, GrantRevokeRoleResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GrantRevokeRoleResponse& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Catalog__isset {
+  _Catalog__isset() : name(false), description(false), locationUri(false) {}
+  bool name :1;
+  bool description :1;
+  bool locationUri :1;
+} _Catalog__isset;
+
+class Catalog {
+ public:
+
+  Catalog(const Catalog&);
+  Catalog& operator=(const Catalog&);
+  Catalog() : name(), description(), locationUri() {
+  }
+
+  virtual ~Catalog() throw();
+  std::string name;
+  std::string description;
+  std::string locationUri;
+
+  _Catalog__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_description(const std::string& val);
+
+  void __set_locationUri(const std::string& val);
+
+  bool operator == (const Catalog & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    if (__isset.description != rhs.__isset.description)
+      return false;
+    else if (__isset.description && !(description == rhs.description))
+      return false;
+    if (!(locationUri == rhs.locationUri))
+      return false;
+    return true;
+  }
+  bool operator != (const Catalog &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Catalog & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Catalog &a, Catalog &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Catalog& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _CreateCatalogRequest__isset {
+  _CreateCatalogRequest__isset() : catalog(false) {}
+  bool catalog :1;
+} _CreateCatalogRequest__isset;
+
+class CreateCatalogRequest {
+ public:
+
+  CreateCatalogRequest(const CreateCatalogRequest&);
+  CreateCatalogRequest& operator=(const CreateCatalogRequest&);
+  CreateCatalogRequest() {
+  }
+
+  virtual ~CreateCatalogRequest() throw();
+  Catalog catalog;
+
+  _CreateCatalogRequest__isset __isset;
+
+  void __set_catalog(const Catalog& val);
+
+  bool operator == (const CreateCatalogRequest & rhs) const
+  {
+    if (!(catalog == rhs.catalog))
+      return false;
+    return true;
+  }
+  bool operator != (const CreateCatalogRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const CreateCatalogRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(CreateCatalogRequest &a, CreateCatalogRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const CreateCatalogRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _AlterCatalogRequest__isset {
+  _AlterCatalogRequest__isset() : name(false), newCat(false) {}
+  bool name :1;
+  bool newCat :1;
+} _AlterCatalogRequest__isset;
+
+class AlterCatalogRequest {
+ public:
+
+  AlterCatalogRequest(const AlterCatalogRequest&);
+  AlterCatalogRequest& operator=(const AlterCatalogRequest&);
+  AlterCatalogRequest() : name() {
+  }
+
+  virtual ~AlterCatalogRequest() throw();
+  std::string name;
+  Catalog newCat;
+
+  _AlterCatalogRequest__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_newCat(const Catalog& val);
+
+  bool operator == (const AlterCatalogRequest & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    if (!(newCat == rhs.newCat))
+      return false;
+    return true;
+  }
+  bool operator != (const AlterCatalogRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const AlterCatalogRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(AlterCatalogRequest &a, AlterCatalogRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const AlterCatalogRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GetCatalogRequest__isset {
+  _GetCatalogRequest__isset() : name(false) {}
+  bool name :1;
+} _GetCatalogRequest__isset;
+
+class GetCatalogRequest {
+ public:
+
+  GetCatalogRequest(const GetCatalogRequest&);
+  GetCatalogRequest& operator=(const GetCatalogRequest&);
+  GetCatalogRequest() : name() {
+  }
+
+  virtual ~GetCatalogRequest() throw();
+  std::string name;
+
+  _GetCatalogRequest__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  bool operator == (const GetCatalogRequest & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    return true;
+  }
+  bool operator != (const GetCatalogRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetCatalogRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetCatalogRequest &a, GetCatalogRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetCatalogRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GetCatalogResponse__isset {
+  _GetCatalogResponse__isset() : catalog(false) {}
+  bool catalog :1;
+} _GetCatalogResponse__isset;
+
+class GetCatalogResponse {
+ public:
+
+  GetCatalogResponse(const GetCatalogResponse&);
+  GetCatalogResponse& operator=(const GetCatalogResponse&);
+  GetCatalogResponse() {
+  }
+
+  virtual ~GetCatalogResponse() throw();
+  Catalog catalog;
+
+  _GetCatalogResponse__isset __isset;
+
+  void __set_catalog(const Catalog& val);
+
+  bool operator == (const GetCatalogResponse & rhs) const
+  {
+    if (!(catalog == rhs.catalog))
+      return false;
+    return true;
+  }
+  bool operator != (const GetCatalogResponse &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetCatalogResponse & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetCatalogResponse &a, GetCatalogResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetCatalogResponse& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _GetCatalogsResponse__isset {
+  _GetCatalogsResponse__isset() : names(false) {}
+  bool names :1;
+} _GetCatalogsResponse__isset;
+
+class GetCatalogsResponse {
+ public:
+
+  GetCatalogsResponse(const GetCatalogsResponse&);
+  GetCatalogsResponse& operator=(const GetCatalogsResponse&);
+  GetCatalogsResponse() {
+  }
+
+  virtual ~GetCatalogsResponse() throw();
+  std::vector<std::string>  names;
+
+  _GetCatalogsResponse__isset __isset;
+
+  void __set_names(const std::vector<std::string> & val);
+
+  bool operator == (const GetCatalogsResponse & rhs) const
+  {
+    if (!(names == rhs.names))
+      return false;
+    return true;
+  }
+  bool operator != (const GetCatalogsResponse &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const GetCatalogsResponse & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(GetCatalogsResponse &a, GetCatalogsResponse &b);
+
+inline std::ostream& operator<<(std::ostream& out, const GetCatalogsResponse& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _DropCatalogRequest__isset {
+  _DropCatalogRequest__isset() : name(false) {}
+  bool name :1;
+} _DropCatalogRequest__isset;
+
+class DropCatalogRequest {
+ public:
+
+  DropCatalogRequest(const DropCatalogRequest&);
+  DropCatalogRequest& operator=(const DropCatalogRequest&);
+  DropCatalogRequest() : name() {
+  }
+
+  virtual ~DropCatalogRequest() throw();
+  std::string name;
+
+  _DropCatalogRequest__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  bool operator == (const DropCatalogRequest & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    return true;
+  }
+  bool operator != (const DropCatalogRequest &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const DropCatalogRequest & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(DropCatalogRequest &a, DropCatalogRequest &b);
+
+inline std::ostream& operator<<(std::ostream& out, const DropCatalogRequest& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Database__isset {
+  _Database__isset() : name(false), description(false), locationUri(false), parameters(false), privileges(false), ownerName(false), ownerType(false), catalogName(false) {}
+  bool name :1;
+  bool description :1;
+  bool locationUri :1;
+  bool parameters :1;
+  bool privileges :1;
+  bool ownerName :1;
+  bool ownerType :1;
+  bool catalogName :1;
+} _Database__isset;
+
+class Database {
+ public:
+
+  Database(const Database&);
+  Database& operator=(const Database&);
+  Database() : name(), description(), locationUri(), ownerName(), ownerType((PrincipalType::type)0), catalogName() {
+  }
+
+  virtual ~Database() throw();
+  std::string name;
+  std::string description;
+  std::string locationUri;
+  std::map<std::string, std::string>  parameters;
+  PrincipalPrivilegeSet privileges;
+  std::string ownerName;
+  PrincipalType::type ownerType;
+  std::string catalogName;
+
+  _Database__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_description(const std::string& val);
+
+  void __set_locationUri(const std::string& val);
+
+  void __set_parameters(const std::map<std::string, std::string> & val);
+
+  void __set_privileges(const PrincipalPrivilegeSet& val);
+
+  void __set_ownerName(const std::string& val);
+
+  void __set_ownerType(const PrincipalType::type val);
+
+  void __set_catalogName(const std::string& val);
+
+  bool operator == (const Database & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    if (!(description == rhs.description))
+      return false;
+    if (!(locationUri == rhs.locationUri))
+      return false;
+    if (!(parameters == rhs.parameters))
+      return false;
+    if (__isset.privileges != rhs.__isset.privileges)
+      return false;
+    else if (__isset.privileges && !(privileges == rhs.privileges))
+      return false;
+    if (__isset.ownerName != rhs.__isset.ownerName)
+      return false;
+    else if (__isset.ownerName && !(ownerName == rhs.ownerName))
+      return false;
+    if (__isset.ownerType != rhs.__isset.ownerType)
+      return false;
+    else if (__isset.ownerType && !(ownerType == rhs.ownerType))
+      return false;
+    if (__isset.catalogName != rhs.__isset.catalogName)
+      return false;
+    else if (__isset.catalogName && !(catalogName == rhs.catalogName))
+      return false;
+    return true;
+  }
+  bool operator != (const Database &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Database & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Database &a, Database &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Database& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SerDeInfo__isset {
+  _SerDeInfo__isset() : name(false), serializationLib(false), parameters(false), description(false), serializerClass(false), deserializerClass(false), serdeType(false) {}
+  bool name :1;
+  bool serializationLib :1;
+  bool parameters :1;
+  bool description :1;
+  bool serializerClass :1;
+  bool deserializerClass :1;
+  bool serdeType :1;
+} _SerDeInfo__isset;
+
+class SerDeInfo {
+ public:
+
+  SerDeInfo(const SerDeInfo&);
+  SerDeInfo& operator=(const SerDeInfo&);
+  SerDeInfo() : name(), serializationLib(), description(), serializerClass(), deserializerClass(), serdeType((SerdeType::type)0) {
+  }
+
+  virtual ~SerDeInfo() throw();
+  std::string name;
+  std::string serializationLib;
+  std::map<std::string, std::string>  parameters;
+  std::string description;
+  std::string serializerClass;
+  std::string deserializerClass;
+  SerdeType::type serdeType;
+
+  _SerDeInfo__isset __isset;
+
+  void __set_name(const std::string& val);
+
+  void __set_serializationLib(const std::string& val);
+
+  void __set_parameters(const std::map<std::string, std::string> & val);
+
+  void __set_description(const std::string& val);
+
+  void __set_serializerClass(const std::string& val);
+
+  void __set_deserializerClass(const std::string& val);
+
+  void __set_serdeType(const SerdeType::type val);
+
+  bool operator == (const SerDeInfo & rhs) const
+  {
+    if (!(name == rhs.name))
+      return false;
+    if (!(serializationLib == rhs.serializationLib))
+      return false;
+    if (!(parameters == rhs.parameters))
+      return false;
+    if (__isset.description != rhs.__isset.description)
+      return false;
+    else if (__isset.description && !(description == rhs.description))
+      return false;
+    if (__isset.serializerClass != rhs.__isset.serializerClass)
+      return false;
+    else if (__isset.serializerClass && !(serializerClass == rhs.serializerClass))
+      return false;
+    if (__isset.deserializerClass != rhs.__isset.deserializerClass)
+      return false;
+    else if (__isset.deserializerClass && !(deserializerClass == rhs.deserializerClass))
+      return false;
+    if (__isset.serdeType != rhs.__isset.serdeType)
+      return false;
+    else if (__isset.serdeType && !(serdeType == rhs.serdeType))
+      return false;
+    return true;
+  }
+  bool operator != (const SerDeInfo &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SerDeInfo & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SerDeInfo &a, SerDeInfo &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SerDeInfo& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Order__isset {
+  _Order__isset() : col(false), order(false) {}
+  bool col :1;
+  bool order :1;
+} _Order__isset;
+
+class Order {
+ public:
+
+  Order(const Order&);
+  Order& operator=(const Order&);
+  Order() : col(), order(0) {
+  }
+
+  virtual ~Order() throw();
+  std::string col;
+  int32_t order;
+
+  _Order__isset __isset;
+
+  void __set_col(const std::string& val);
+
+  void __set_order(const int32_t val);
+
+  bool operator == (const Order & rhs) const
+  {
+    if (!(col == rhs.col))
+      return false;
+    if (!(order == rhs.order))
+      return false;
+    return true;
+  }
+  bool operator != (const Order &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Order & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Order &a, Order &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Order& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _SkewedInfo__isset {
+  _SkewedInfo__isset() : skewedColNames(false), skewedColValues(false), skewedColValueLocationMaps(false) {}
+  bool skewedColNames :1;
+  bool skewedColValues :1;
+  bool skewedColValueLocationMaps :1;
+} _SkewedInfo__isset;
+
+class SkewedInfo {
+ public:
+
+  SkewedInfo(const SkewedInfo&);
+  SkewedInfo& operator=(const SkewedInfo&);
+  SkewedInfo() {
+  }
+
+  virtual ~SkewedInfo() throw();
+  std::vector<std::string>  skewedColNames;
+  std::vector<std::vector<std::string> >  skewedColValues;
+  std::map<std::vector<std::string> , std::string>  skewedColValueLocationMaps;
+
+  _SkewedInfo__isset __isset;
+
+  void __set_skewedColNames(const std::vector<std::string> & val);
+
+  void __set_skewedColValues(const std::vector<std::vector<std::string> > & val);
+
+  void __set_skewedColValueLocationMaps(const std::map<std::vector<std::string> , std::string> & val);
+
+  bool operator == (const SkewedInfo & rhs) const
+  {
+    if (!(skewedColNames == rhs.skewedColNames))
+      return false;
+    if (!(skewedColValues == rhs.skewedColValues))
+      return false;
+    if (!(skewedColValueLocationMaps == rhs.skewedColValueLocationMaps))
+      return false;
+    return true;
+  }
+  bool operator != (const SkewedInfo &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const SkewedInfo & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(SkewedInfo &a, SkewedInfo &b);
+
+inline std::ostream& operator<<(std::ostream& out, const SkewedInfo& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _StorageDescriptor__isset {
+  _StorageDescriptor__isset() : cols(false), location(false), inputFormat(false), outputFormat(false), compressed(false), numBuckets(false), serdeInfo(false), bucketCols(false), sortCols(false), parameters(false), skewedInfo(false), storedAsSubDirectories(false) {}
+  bool cols :1;
+  bool location :1;
+  bool inputFormat :1;
+  bool outputFormat :1;
+  bool compressed :1;
+  bool numBuckets :1;
+  bool serdeInfo :1;
+  bool bucketCols :1;
+  bool sortCols :1;
+  bool parameters :1;
+  bool skewedInfo :1;
+  bool storedAsSubDirectories :1;
+} _StorageDescriptor__isset;
+
+class StorageDescriptor {
+ public:
+
+  StorageDescriptor(const StorageDescriptor&);
+  StorageDescriptor& operator=(const StorageDescriptor&);
+  StorageDescriptor() : location(), inputFormat(), outputFormat(), compressed(0), numBuckets(0), storedAsSubDirectories(0) {
+  }
+
+  virtual ~StorageDescriptor() throw();
+  std::vector<FieldSchema>  cols;
+  std::string location;
+  std::string inputFormat;
+  std::string outputFormat;
+  bool compressed;
+  int32_t numBuckets;
+  SerDeInfo serdeInfo;
+  std::vector<std::string>  bucketCols;
+  std::vector<Order>  sortCols;
+  std::map<std::string, std::string>  parameters;
+  SkewedInfo skewedInfo;
+  bool storedAsSubDirectories;
+
+  _StorageDescriptor__isset __isset;
+
+  void __set_cols(const std::vector<FieldSchema> & val);
+
+  void __set_location(const std::string& val);
+
+  void __set_inputFormat(const std::string& val);
+
+  void __set_outputFormat(const std::string& val);
+
+  void __set_compressed(const bool val);
+
+  void __set_numBuckets(const int32_t val);
+
+  void __set_serdeInfo(const SerDeInfo& val);
+
+  void __set_bucketCols(const std::vector<std::string> & val);
+
+  void __set_sortCols(const std::vector<Order> & val);
+
+  void __set_parameters(const std::map<std::string, std::string> & val);
+
+  void __set_skewedInfo(const SkewedInfo& val);
+
+  void __set_storedAsSubDirectories(const bool val);
+
+  bool operator == (const StorageDescriptor & rhs) const
+  {
+    if (!(cols == rhs.cols))
+      return false;
+    if (!(location == rhs.location))
+      return false;
+    if (!(inputFormat == rhs.inputFormat))
+      return false;
+    if (!(outputFormat == rhs.outputFormat))
+      return false;
+    if (!(compressed == rhs.compressed))
+      return false;
+    if (!(numBuckets == rhs.numBuckets))
+      return false;
+    if (!(serdeInfo == rhs.serdeInfo))
+      return false;
+    if (!(bucketCols == rhs.bucketCols))
+      return false;
+    if (!(sortCols == rhs.sortCols))
+      return false;
+    if (!(parameters == rhs.parameters))
+      return false;
+    if (__isset.skewedInfo != rhs.__isset.skewedInfo)
+      return false;
+    else if (__isset.skewedInfo && !(skewedInfo == rhs.skewedInfo))
+      return false;
+    if (__isset.storedAsSubDirectories != rhs.__isset.storedAsSubDirectories)
+      return false;
+    else if (__isset.storedAsSubDirectories && !(storedAsSubDirectories == rhs.storedAsSubDirectories))
+      return false;
+    return true;
+  }
+  bool operator != (const StorageDescriptor &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const StorageDescriptor & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(StorageDescriptor &a, StorageDescriptor &b);
+
+inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Table__isset {
+  _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), rewriteEnabled(false), creationMetadata(false), catName(false), ownerType(true) {}
+  bool tableName :1;
+  bool dbName :1;
+  bool owner :1;
+  bool createTime :1;
+  bool lastAccessTime :1;
+  bool retention :1;
+  bool sd :1;
+  bool partitionKeys :1;
+  bool parameters :1;
+  bool viewOriginalText :1;
+  bool viewExpandedText :1;
+  bool tableType :1;
+  bool privileges :1;
+  bool temporary :1;
+  bool rewriteEnabled :1;
+  bool creationMetadata :1;
+  bool catName :1;
+  bool ownerType :1;
+} _Table__isset;
+
+class Table {
+ public:
+
+  Table(const Table&);
+  Table& operator=(const Table&);
+  Table() : tableName(), dbName(), owner(), createTime(0), lastAccessTime(0), retention(0), viewOriginalText(), viewExpandedText(), tableType(), temporary(false), rewriteEnabled(0), catName(), ownerType((PrincipalType::type)1) {
+    ownerType = (PrincipalType::type)1;
+
+  }
+
+  virtual ~Table() throw();
+  std::string tableName;
+  std::string dbName;
+  std::string owner;
+  int32_t createTime;
+  int32_t lastAccessTime;
+  int32_t retention;
+  StorageDescriptor sd;
+  std::vector<FieldSchema>  partitionKeys;
+  std::map<std::string, std::string>  parameters;
+  std::string viewOriginalText;
+  std::string viewExpandedText;
+  std::string tableType;
+  PrincipalPrivilegeSet privileges;
+  bool temporary;
+  bool rewriteEnabled;
+  CreationMetadata creationMetadata;
+  std::string catName;
+  PrincipalType::type ownerType;
+
+  _Table__isset __isset;
+
+  void __set_tableName(const std::string& val);
+
+  void __set_dbName(const std::string& val);
+
+  void __set_owner(const std::string& val);
+
+  void __set_createTime(const int32_t val);
+
+  void __set_lastAccessTime(const int32_t val);
+
+  void __set_retention(const int32_t val);
+
+  void __set_sd(const StorageDescriptor& val);
+
+  void __set_partitionKeys(const std::vector<FieldSchema> & val);
+
+  void __set_parameters(const std::map<std::string, std::string> & val);
+
+  void __set_viewOriginalText(const std::string& val);
+
+  void __set_viewExpandedText(const std::string& val);
+
+  void __set_tableType(const std::string& val);
+
+  void __set_privileges(const PrincipalPrivilegeSet& val);
+
+  void __set_temporary(const bool val);
+
+  void __set_rewriteEnabled(const bool val);
+
+  void __set_creationMetadata(const CreationMetadata& val);
+
+  void __set_catName(const std::string& val);
+
+  void __set_ownerType(const PrincipalType::type val);
+
+  bool operator == (const Table & rhs) const
+  {
+    if (!(tableName == rhs.tableName))
+      return false;
+    if (!(dbName == rhs.dbName))
+      return false;
+    if (!(owner == rhs.owner))
+      return false;
+    if (!(createTime == rhs.createTime))
+      return false;
+    if (!(lastAccessTime == rhs.lastAccessTime))
+      return false;
+    if (!(retention == rhs.retention))
+      return false;
+    if (!(sd == rhs.sd))
+      return false;
+    if (!(partitionKeys == rhs.partitionKeys))
+      return false;
+    if (!(parameters == rhs.parameters))
+      return false;
+    if (!(viewOriginalText == rhs.viewOriginalText))
+      return false;
+    if (!(viewExpandedText == rhs.viewExpandedText))
+      return false;
+    if (!(tableType == rhs.tableType))
+      return false;
+    if (__isset.privileges != rhs.__isset.privileges)
+      return false;
+    else if (__isset.privileges && !(privileges == rhs.privileges))
+      return false;
+    if (__isset.temporary != rhs.__isset.temporary)
+      return false;
+    else if (__isset.temporary && !(temporary == rhs.temporary))
+      return false;
+    if (__isset.rewriteEnabled != rhs.__isset.rewriteEnabled)
+      return false;
+    else if (__isset.rewriteEnabled && !(rewriteEnabled == rhs.rewriteEnabled))
+      return false;
+    if (__isset.creationMetadata != rhs.__isset.creationMetadata)
+      return false;
+    else if (__isset.creationMetadata && !(creationMetadata == rhs.creationMetadata))
+      return false;
+    if (__isset.catName != rhs.__isset.catName)
+      return false;
+    else if (__isset.catName && !(catName == rhs.catName))
+      return false;
+    if (__isset.ownerType != rhs.__isset.ownerType)
+      return false;
+    else if (__isset.ownerType && !(ownerType == rhs.ownerType))
+      return false;
+    return true;
+  }
+  bool operator != (const Table &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Table & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Table &a, Table &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Table& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _Partition__isset {
+  _Partition__isset() : values(false), dbName(false), tableName(false), createTime(false), lastAccessTime(false), sd(false), parameters(false), privileges(false), catName(false) {}
+  bool values :1;
+  bool dbName :1;
+  bool tableName :1;
+  bool createTime :1;
+  bool lastAccessTime :1;
+  bool sd :1;
+  bool parameters :1;
+  bool privileges :1;
+  bool catName :1;
+} _Partition__isset;
+
+class Partition {
+ public:
+
+  Partition(const Partition&);
+  Partition& operator=(const Partition&);
+  Partition() : dbName(), tableName(), createTime(0), lastAccessTime(0), catName() {
+  }
+
+  virtual ~Partition() throw();
+  std::vector<std::string>  values;
+  std::string dbName;
+  std::string tableName;
+  int32_t createTime;
+  int32_t lastAccessTime;
+  StorageDescriptor sd;
+  std::map<std::string, std::string>  parameters;
+  PrincipalPrivilegeSet privileges;
+  std::string catName;
+
+  _Partition__isset __isset;
+
+  void __set_values(const std::vector<std::string> & val);
+
+  void __set_dbName(const std::string& val);
+
+  void __set_tableName(const std::string& val);
+
+  void __set_createTime(const int32_t val);
+
+  void __set_lastAccessTime(const int32_t val);
+
+  void __set_sd(const StorageDescriptor& val);
+
+  void __set_parameters(const std::map<std::string, std::string> & val);
+
+  void __set_privileges(const PrincipalPrivilegeSet& val);
+
+  void __set_catName(const std::string& val);
+
+  bool operator == (const Partition & rhs) const
+  {
+    if (!(values == rhs.values))
+      return false;
+    if (!(dbName == rhs.dbName))
+      return false;
+    if (!(tableName == rhs.tableName))
+      return false;
+    if (!(createTime == rhs.createTime))
+      return false;
+    if (!(lastAccessTime == rhs.lastAccessTime))
+      return false;
+    if (!(sd == rhs.sd))
+      return false;
+    if (!(parameters == rhs.parameters))
+      return false;
+    if (__isset.privileges != rhs.__isset.privileges)
+      return false;
+    else if (__isset.privileges && !(privileges == rhs.privileges))
+      return false;
+    if (__isset.catName != rhs.__isset.catName)
+      return false;
+    else if (__isset.catName && !(catName == rhs.catName))
+      return false;
+    return true;
+  }
+  bool operator != (const Partition &rhs) const {
+    return !(*this == rhs);
+  }
+
+  bool operator < (const Partition & ) const;
+
+  uint32_t read(::apache::thrift::protocol::TProtocol* iprot);
+  uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const;
+
+  virtual void printTo(std::ostream& out) const;
+};
+
+void swap(Partition &a, Partition &b);
+
+inline std::ostream& operator<<(std::ostream& out, const Partition& obj)
+{
+  obj.printTo(out);
+  return out;
+}
+
+typedef struct _PartitionWithoutSD__isset {
+  _PartitionWithoutSD__isset() : values(false), createTime(false), lastAccessTime(false), relativePath(false), parameters(false), privileges(false) {}
+  bool values :1;
+  bool createTime :1;
+  bool lastAccessTime :1;
+  bool relativePath :1;
+  bool parameters :1;
+  bool privileges :1;
+} _PartitionWithoutSD__isset;
+
+class PartitionWithoutSD {
+ public:
+
+  PartitionWithoutSD(const PartitionWithoutSD&);
+  PartitionWithoutSD& operator=(const PartitionWithoutSD&);
+  PartitionWithoutSD() : createTime(0), lastAccessTime(0), relativePath() {
+  }
+
+  virtual ~PartitionWithoutSD() throw();
+  std::vector<std::string>  values;
+  int32_t createTime;
+  int32_t lastAccessTime;
+  std::string relativePath;
+  std::map<std::string, std::string>  parameters;
+  PrincipalPrivilegeSet privileges;
+
+  _PartitionWithoutSD__isset __isset;
+
+  void __set_values(const std::vector<std::string> & val);
+
+  void __set_createTime(const int32_t val);
+
+  void __set_lastAccessTime(const int32_t val);
+
+  void __set_relativePath(const std::string& val

<TRUNCATED>

[88/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
index 0000000,3c88d8f..821049e
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableRequest.java
@@@ -1,0 -1,711 +1,922 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTableRequest implements org.apache.thrift.TBase<GetTableRequest, GetTableRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetTableRequest> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableRequest");
+ 
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField CAPABILITIES_FIELD_DESC = new org.apache.thrift.protocol.TField("capabilities", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)4);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)5);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)6);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new GetTableRequestStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new GetTableRequestTupleSchemeFactory());
+   }
+ 
+   private String dbName; // required
+   private String tblName; // required
+   private ClientCapabilities capabilities; // optional
+   private String catName; // optional
++  private long txnId; // optional
++  private String validWriteIdList; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     DB_NAME((short)1, "dbName"),
+     TBL_NAME((short)2, "tblName"),
+     CAPABILITIES((short)3, "capabilities"),
 -    CAT_NAME((short)4, "catName");
++    CAT_NAME((short)4, "catName"),
++    TXN_ID((short)5, "txnId"),
++    VALID_WRITE_ID_LIST((short)6, "validWriteIdList");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // DB_NAME
+           return DB_NAME;
+         case 2: // TBL_NAME
+           return TBL_NAME;
+         case 3: // CAPABILITIES
+           return CAPABILITIES;
+         case 4: // CAT_NAME
+           return CAT_NAME;
++        case 5: // TXN_ID
++          return TXN_ID;
++        case 6: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
 -  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME};
++  private static final int __TXNID_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.CAPABILITIES,_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.CAPABILITIES, new org.apache.thrift.meta_data.FieldMetaData("capabilities", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClientCapabilities.class)));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableRequest.class, metaDataMap);
+   }
+ 
+   public GetTableRequest() {
++    this.txnId = -1L;
++
+   }
+ 
+   public GetTableRequest(
+     String dbName,
+     String tblName)
+   {
+     this();
+     this.dbName = dbName;
+     this.tblName = tblName;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public GetTableRequest(GetTableRequest other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetDbName()) {
+       this.dbName = other.dbName;
+     }
+     if (other.isSetTblName()) {
+       this.tblName = other.tblName;
+     }
+     if (other.isSetCapabilities()) {
+       this.capabilities = new ClientCapabilities(other.capabilities);
+     }
+     if (other.isSetCatName()) {
+       this.catName = other.catName;
+     }
++    this.txnId = other.txnId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
+   }
+ 
+   public GetTableRequest deepCopy() {
+     return new GetTableRequest(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.dbName = null;
+     this.tblName = null;
+     this.capabilities = null;
+     this.catName = null;
++    this.txnId = -1L;
++
++    this.validWriteIdList = null;
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = dbName;
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getTblName() {
+     return this.tblName;
+   }
+ 
+   public void setTblName(String tblName) {
+     this.tblName = tblName;
+   }
+ 
+   public void unsetTblName() {
+     this.tblName = null;
+   }
+ 
+   /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTblName() {
+     return this.tblName != null;
+   }
+ 
+   public void setTblNameIsSet(boolean value) {
+     if (!value) {
+       this.tblName = null;
+     }
+   }
+ 
+   public ClientCapabilities getCapabilities() {
+     return this.capabilities;
+   }
+ 
+   public void setCapabilities(ClientCapabilities capabilities) {
+     this.capabilities = capabilities;
+   }
+ 
+   public void unsetCapabilities() {
+     this.capabilities = null;
+   }
+ 
+   /** Returns true if field capabilities is set (has been assigned a value) and false otherwise */
+   public boolean isSetCapabilities() {
+     return this.capabilities != null;
+   }
+ 
+   public void setCapabilitiesIsSet(boolean value) {
+     if (!value) {
+       this.capabilities = null;
+     }
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = catName;
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case TBL_NAME:
+       if (value == null) {
+         unsetTblName();
+       } else {
+         setTblName((String)value);
+       }
+       break;
+ 
+     case CAPABILITIES:
+       if (value == null) {
+         unsetCapabilities();
+       } else {
+         setCapabilities((ClientCapabilities)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case DB_NAME:
+       return getDbName();
+ 
+     case TBL_NAME:
+       return getTblName();
+ 
+     case CAPABILITIES:
+       return getCapabilities();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
++    case TXN_ID:
++      return getTxnId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case DB_NAME:
+       return isSetDbName();
+     case TBL_NAME:
+       return isSetTblName();
+     case CAPABILITIES:
+       return isSetCapabilities();
+     case CAT_NAME:
+       return isSetCatName();
++    case TXN_ID:
++      return isSetTxnId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof GetTableRequest)
+       return this.equals((GetTableRequest)that);
+     return false;
+   }
+ 
+   public boolean equals(GetTableRequest that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_tblName = true && this.isSetTblName();
+     boolean that_present_tblName = true && that.isSetTblName();
+     if (this_present_tblName || that_present_tblName) {
+       if (!(this_present_tblName && that_present_tblName))
+         return false;
+       if (!this.tblName.equals(that.tblName))
+         return false;
+     }
+ 
+     boolean this_present_capabilities = true && this.isSetCapabilities();
+     boolean that_present_capabilities = true && that.isSetCapabilities();
+     if (this_present_capabilities || that_present_capabilities) {
+       if (!(this_present_capabilities && that_present_capabilities))
+         return false;
+       if (!this.capabilities.equals(that.capabilities))
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_tblName = true && (isSetTblName());
+     list.add(present_tblName);
+     if (present_tblName)
+       list.add(tblName);
+ 
+     boolean present_capabilities = true && (isSetCapabilities());
+     list.add(present_capabilities);
+     if (present_capabilities)
+       list.add(capabilities);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(GetTableRequest other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTblName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCapabilities()).compareTo(other.isSetCapabilities());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCapabilities()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.capabilities, other.capabilities);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("GetTableRequest(");
+     boolean first = true;
+ 
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tblName:");
+     if (this.tblName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tblName);
+     }
+     first = false;
+     if (isSetCapabilities()) {
+       if (!first) sb.append(", ");
+       sb.append("capabilities:");
+       if (this.capabilities == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.capabilities);
+       }
+       first = false;
+     }
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetDbName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetTblName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+     if (capabilities != null) {
+       capabilities.validate();
+     }
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class GetTableRequestStandardSchemeFactory implements SchemeFactory {
+     public GetTableRequestStandardScheme getScheme() {
+       return new GetTableRequestStandardScheme();
+     }
+   }
+ 
+   private static class GetTableRequestStandardScheme extends StandardScheme<GetTableRequest> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableRequest struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = iprot.readString();
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // TBL_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tblName = iprot.readString();
+               struct.setTblNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // CAPABILITIES
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.capabilities = new ClientCapabilities();
+               struct.capabilities.read(iprot);
+               struct.setCapabilitiesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = iprot.readString();
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 5: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 6: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableRequest struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tblName != null) {
+         oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+         oprot.writeString(struct.tblName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.capabilities != null) {
+         if (struct.isSetCapabilities()) {
+           oprot.writeFieldBegin(CAPABILITIES_FIELD_DESC);
+           struct.capabilities.write(oprot);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class GetTableRequestTupleSchemeFactory implements SchemeFactory {
+     public GetTableRequestTupleScheme getScheme() {
+       return new GetTableRequestTupleScheme();
+     }
+   }
+ 
+   private static class GetTableRequestTupleScheme extends TupleScheme<GetTableRequest> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, GetTableRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       oprot.writeString(struct.dbName);
+       oprot.writeString(struct.tblName);
+       BitSet optionals = new BitSet();
+       if (struct.isSetCapabilities()) {
+         optionals.set(0);
+       }
+       if (struct.isSetCatName()) {
+         optionals.set(1);
+       }
 -      oprot.writeBitSet(optionals, 2);
++      if (struct.isSetTxnId()) {
++        optionals.set(2);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(3);
++      }
++      oprot.writeBitSet(optionals, 4);
+       if (struct.isSetCapabilities()) {
+         struct.capabilities.write(oprot);
+       }
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, GetTableRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       struct.dbName = iprot.readString();
+       struct.setDbNameIsSet(true);
+       struct.tblName = iprot.readString();
+       struct.setTblNameIsSet(true);
 -      BitSet incoming = iprot.readBitSet(2);
++      BitSet incoming = iprot.readBitSet(4);
+       if (incoming.get(0)) {
+         struct.capabilities = new ClientCapabilities();
+         struct.capabilities.read(iprot);
+         struct.setCapabilitiesIsSet(true);
+       }
+       if (incoming.get(1)) {
+         struct.catName = iprot.readString();
+         struct.setCatNameIsSet(true);
+       }
++      if (incoming.get(2)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(3)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
index 0000000,968e250..aa41c15
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetTableResult.java
@@@ -1,0 -1,394 +1,501 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetTableResult implements org.apache.thrift.TBase<GetTableResult, GetTableResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetTableResult> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetTableResult");
+ 
+   private static final org.apache.thrift.protocol.TField TABLE_FIELD_DESC = new org.apache.thrift.protocol.TField("table", org.apache.thrift.protocol.TType.STRUCT, (short)1);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)2);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new GetTableResultStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new GetTableResultTupleSchemeFactory());
+   }
+ 
+   private Table table; // required
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
 -    TABLE((short)1, "table");
++    TABLE((short)1, "table"),
++    IS_STATS_COMPLIANT((short)2, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // TABLE
+           return TABLE;
++        case 2: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.TABLE, new org.apache.thrift.meta_data.FieldMetaData("table", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Table.class)));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetTableResult.class, metaDataMap);
+   }
+ 
+   public GetTableResult() {
+   }
+ 
+   public GetTableResult(
+     Table table)
+   {
+     this();
+     this.table = table;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public GetTableResult(GetTableResult other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetTable()) {
+       this.table = new Table(other.table);
+     }
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public GetTableResult deepCopy() {
+     return new GetTableResult(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.table = null;
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public Table getTable() {
+     return this.table;
+   }
+ 
+   public void setTable(Table table) {
+     this.table = table;
+   }
+ 
+   public void unsetTable() {
+     this.table = null;
+   }
+ 
+   /** Returns true if field table is set (has been assigned a value) and false otherwise */
+   public boolean isSetTable() {
+     return this.table != null;
+   }
+ 
+   public void setTableIsSet(boolean value) {
+     if (!value) {
+       this.table = null;
+     }
+   }
+ 
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case TABLE:
+       if (value == null) {
+         unsetTable();
+       } else {
+         setTable((Table)value);
+       }
+       break;
+ 
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case TABLE:
+       return getTable();
+ 
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case TABLE:
+       return isSetTable();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof GetTableResult)
+       return this.equals((GetTableResult)that);
+     return false;
+   }
+ 
+   public boolean equals(GetTableResult that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_table = true && this.isSetTable();
+     boolean that_present_table = true && that.isSetTable();
+     if (this_present_table || that_present_table) {
+       if (!(this_present_table && that_present_table))
+         return false;
+       if (!this.table.equals(that.table))
+         return false;
+     }
+ 
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_table = true && (isSetTable());
+     list.add(present_table);
+     if (present_table)
+       list.add(table);
+ 
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(GetTableResult other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetTable()).compareTo(other.isSetTable());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTable()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table, other.table);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("GetTableResult(");
+     boolean first = true;
+ 
+     sb.append("table:");
+     if (this.table == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.table);
+     }
+     first = false;
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetTable()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'table' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+     if (table != null) {
+       table.validate();
+     }
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class GetTableResultStandardSchemeFactory implements SchemeFactory {
+     public GetTableResultStandardScheme getScheme() {
+       return new GetTableResultStandardScheme();
+     }
+   }
+ 
+   private static class GetTableResultStandardScheme extends StandardScheme<GetTableResult> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, GetTableResult struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // TABLE
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.table = new Table();
+               struct.table.read(iprot);
+               struct.setTableIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 2: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, GetTableResult struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.table != null) {
+         oprot.writeFieldBegin(TABLE_FIELD_DESC);
+         struct.table.write(oprot);
+         oprot.writeFieldEnd();
+       }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class GetTableResultTupleSchemeFactory implements SchemeFactory {
+     public GetTableResultTupleScheme getScheme() {
+       return new GetTableResultTupleScheme();
+     }
+   }
+ 
+   private static class GetTableResultTupleScheme extends TupleScheme<GetTableResult> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       struct.table.write(oprot);
++      BitSet optionals = new BitSet();
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(0);
++      }
++      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, GetTableResult struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       struct.table = new Table();
+       struct.table.read(iprot);
+       struct.setTableIsSet(true);
++      BitSet incoming = iprot.readBitSet(1);
++      if (incoming.get(0)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[47/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
new file mode 100644
index 0000000..789c150
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp
@@ -0,0 +1,1079 @@
+// This autogenerated skeleton file illustrates how to build a server.
+// You should copy it to another filename to avoid overwriting it.
+
+#include "ThriftHiveMetastore.h"
+#include <thrift/protocol/TBinaryProtocol.h>
+#include <thrift/server/TSimpleServer.h>
+#include <thrift/transport/TServerSocket.h>
+#include <thrift/transport/TBufferTransports.h>
+
+using namespace ::apache::thrift;
+using namespace ::apache::thrift::protocol;
+using namespace ::apache::thrift::transport;
+using namespace ::apache::thrift::server;
+
+using boost::shared_ptr;
+
+using namespace  ::Apache::Hadoop::Hive;
+
+class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf {
+ public:
+  ThriftHiveMetastoreHandler() {
+    // Your initialization goes here
+  }
+
+  void getMetaConf(std::string& _return, const std::string& key) {
+    // Your implementation goes here
+    printf("getMetaConf\n");
+  }
+
+  void setMetaConf(const std::string& key, const std::string& value) {
+    // Your implementation goes here
+    printf("setMetaConf\n");
+  }
+
+  void create_catalog(const CreateCatalogRequest& catalog) {
+    // Your implementation goes here
+    printf("create_catalog\n");
+  }
+
+  void alter_catalog(const AlterCatalogRequest& rqst) {
+    // Your implementation goes here
+    printf("alter_catalog\n");
+  }
+
+  void get_catalog(GetCatalogResponse& _return, const GetCatalogRequest& catName) {
+    // Your implementation goes here
+    printf("get_catalog\n");
+  }
+
+  void get_catalogs(GetCatalogsResponse& _return) {
+    // Your implementation goes here
+    printf("get_catalogs\n");
+  }
+
+  void drop_catalog(const DropCatalogRequest& catName) {
+    // Your implementation goes here
+    printf("drop_catalog\n");
+  }
+
+  void create_database(const Database& database) {
+    // Your implementation goes here
+    printf("create_database\n");
+  }
+
+  void get_database(Database& _return, const std::string& name) {
+    // Your implementation goes here
+    printf("get_database\n");
+  }
+
+  void drop_database(const std::string& name, const bool deleteData, const bool cascade) {
+    // Your implementation goes here
+    printf("drop_database\n");
+  }
+
+  void get_databases(std::vector<std::string> & _return, const std::string& pattern) {
+    // Your implementation goes here
+    printf("get_databases\n");
+  }
+
+  void get_all_databases(std::vector<std::string> & _return) {
+    // Your implementation goes here
+    printf("get_all_databases\n");
+  }
+
+  void alter_database(const std::string& dbname, const Database& db) {
+    // Your implementation goes here
+    printf("alter_database\n");
+  }
+
+  void get_type(Type& _return, const std::string& name) {
+    // Your implementation goes here
+    printf("get_type\n");
+  }
+
+  bool create_type(const Type& type) {
+    // Your implementation goes here
+    printf("create_type\n");
+  }
+
+  bool drop_type(const std::string& type) {
+    // Your implementation goes here
+    printf("drop_type\n");
+  }
+
+  void get_type_all(std::map<std::string, Type> & _return, const std::string& name) {
+    // Your implementation goes here
+    printf("get_type_all\n");
+  }
+
+  void get_fields(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name) {
+    // Your implementation goes here
+    printf("get_fields\n");
+  }
+
+  void get_fields_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("get_fields_with_environment_context\n");
+  }
+
+  void get_schema(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name) {
+    // Your implementation goes here
+    printf("get_schema\n");
+  }
+
+  void get_schema_with_environment_context(std::vector<FieldSchema> & _return, const std::string& db_name, const std::string& table_name, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("get_schema_with_environment_context\n");
+  }
+
+  void create_table(const Table& tbl) {
+    // Your implementation goes here
+    printf("create_table\n");
+  }
+
+  void create_table_with_environment_context(const Table& tbl, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("create_table_with_environment_context\n");
+  }
+
+  void create_table_with_constraints(const Table& tbl, const std::vector<SQLPrimaryKey> & primaryKeys, const std::vector<SQLForeignKey> & foreignKeys, const std::vector<SQLUniqueConstraint> & uniqueConstraints, const std::vector<SQLNotNullConstraint> & notNullConstraints, const std::vector<SQLDefaultConstraint> & defaultConstraints, const std::vector<SQLCheckConstraint> & checkConstraints) {
+    // Your implementation goes here
+    printf("create_table_with_constraints\n");
+  }
+
+  void drop_constraint(const DropConstraintRequest& req) {
+    // Your implementation goes here
+    printf("drop_constraint\n");
+  }
+
+  void add_primary_key(const AddPrimaryKeyRequest& req) {
+    // Your implementation goes here
+    printf("add_primary_key\n");
+  }
+
+  void add_foreign_key(const AddForeignKeyRequest& req) {
+    // Your implementation goes here
+    printf("add_foreign_key\n");
+  }
+
+  void add_unique_constraint(const AddUniqueConstraintRequest& req) {
+    // Your implementation goes here
+    printf("add_unique_constraint\n");
+  }
+
+  void add_not_null_constraint(const AddNotNullConstraintRequest& req) {
+    // Your implementation goes here
+    printf("add_not_null_constraint\n");
+  }
+
+  void add_default_constraint(const AddDefaultConstraintRequest& req) {
+    // Your implementation goes here
+    printf("add_default_constraint\n");
+  }
+
+  void add_check_constraint(const AddCheckConstraintRequest& req) {
+    // Your implementation goes here
+    printf("add_check_constraint\n");
+  }
+
+  void drop_table(const std::string& dbname, const std::string& name, const bool deleteData) {
+    // Your implementation goes here
+    printf("drop_table\n");
+  }
+
+  void drop_table_with_environment_context(const std::string& dbname, const std::string& name, const bool deleteData, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("drop_table_with_environment_context\n");
+  }
+
+  void truncate_table(const std::string& dbName, const std::string& tableName, const std::vector<std::string> & partNames) {
+    // Your implementation goes here
+    printf("truncate_table\n");
+  }
+
+  void get_tables(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern) {
+    // Your implementation goes here
+    printf("get_tables\n");
+  }
+
+  void get_tables_by_type(std::vector<std::string> & _return, const std::string& db_name, const std::string& pattern, const std::string& tableType) {
+    // Your implementation goes here
+    printf("get_tables_by_type\n");
+  }
+
+  void get_materialized_views_for_rewriting(std::vector<std::string> & _return, const std::string& db_name) {
+    // Your implementation goes here
+    printf("get_materialized_views_for_rewriting\n");
+  }
+
+  void get_table_meta(std::vector<TableMeta> & _return, const std::string& db_patterns, const std::string& tbl_patterns, const std::vector<std::string> & tbl_types) {
+    // Your implementation goes here
+    printf("get_table_meta\n");
+  }
+
+  void get_all_tables(std::vector<std::string> & _return, const std::string& db_name) {
+    // Your implementation goes here
+    printf("get_all_tables\n");
+  }
+
+  void get_table(Table& _return, const std::string& dbname, const std::string& tbl_name) {
+    // Your implementation goes here
+    printf("get_table\n");
+  }
+
+  void get_table_objects_by_name(std::vector<Table> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) {
+    // Your implementation goes here
+    printf("get_table_objects_by_name\n");
+  }
+
+  void get_table_req(GetTableResult& _return, const GetTableRequest& req) {
+    // Your implementation goes here
+    printf("get_table_req\n");
+  }
+
+  void get_table_objects_by_name_req(GetTablesResult& _return, const GetTablesRequest& req) {
+    // Your implementation goes here
+    printf("get_table_objects_by_name_req\n");
+  }
+
+  void get_materialization_invalidation_info(std::map<std::string, Materialization> & _return, const std::string& dbname, const std::vector<std::string> & tbl_names) {
+    // Your implementation goes here
+    printf("get_materialization_invalidation_info\n");
+  }
+
+  void update_creation_metadata(const std::string& catName, const std::string& dbname, const std::string& tbl_name, const CreationMetadata& creation_metadata) {
+    // Your implementation goes here
+    printf("update_creation_metadata\n");
+  }
+
+  void get_table_names_by_filter(std::vector<std::string> & _return, const std::string& dbname, const std::string& filter, const int16_t max_tables) {
+    // Your implementation goes here
+    printf("get_table_names_by_filter\n");
+  }
+
+  void alter_table(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl) {
+    // Your implementation goes here
+    printf("alter_table\n");
+  }
+
+  void alter_table_with_environment_context(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("alter_table_with_environment_context\n");
+  }
+
+  void alter_table_with_cascade(const std::string& dbname, const std::string& tbl_name, const Table& new_tbl, const bool cascade) {
+    // Your implementation goes here
+    printf("alter_table_with_cascade\n");
+  }
+
+  void add_partition(Partition& _return, const Partition& new_part) {
+    // Your implementation goes here
+    printf("add_partition\n");
+  }
+
+  void add_partition_with_environment_context(Partition& _return, const Partition& new_part, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("add_partition_with_environment_context\n");
+  }
+
+  int32_t add_partitions(const std::vector<Partition> & new_parts) {
+    // Your implementation goes here
+    printf("add_partitions\n");
+  }
+
+  int32_t add_partitions_pspec(const std::vector<PartitionSpec> & new_parts) {
+    // Your implementation goes here
+    printf("add_partitions_pspec\n");
+  }
+
+  void append_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) {
+    // Your implementation goes here
+    printf("append_partition\n");
+  }
+
+  void add_partitions_req(AddPartitionsResult& _return, const AddPartitionsRequest& request) {
+    // Your implementation goes here
+    printf("add_partitions_req\n");
+  }
+
+  void append_partition_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("append_partition_with_environment_context\n");
+  }
+
+  void append_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) {
+    // Your implementation goes here
+    printf("append_partition_by_name\n");
+  }
+
+  void append_partition_by_name_with_environment_context(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("append_partition_by_name_with_environment_context\n");
+  }
+
+  bool drop_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData) {
+    // Your implementation goes here
+    printf("drop_partition\n");
+  }
+
+  bool drop_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const bool deleteData, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("drop_partition_with_environment_context\n");
+  }
+
+  bool drop_partition_by_name(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData) {
+    // Your implementation goes here
+    printf("drop_partition_by_name\n");
+  }
+
+  bool drop_partition_by_name_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const bool deleteData, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("drop_partition_by_name_with_environment_context\n");
+  }
+
+  void drop_partitions_req(DropPartitionsResult& _return, const DropPartitionsRequest& req) {
+    // Your implementation goes here
+    printf("drop_partitions_req\n");
+  }
+
+  void get_partition(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals) {
+    // Your implementation goes here
+    printf("get_partition\n");
+  }
+
+  void exchange_partition(Partition& _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) {
+    // Your implementation goes here
+    printf("exchange_partition\n");
+  }
+
+  void exchange_partitions(std::vector<Partition> & _return, const std::map<std::string, std::string> & partitionSpecs, const std::string& source_db, const std::string& source_table_name, const std::string& dest_db, const std::string& dest_table_name) {
+    // Your implementation goes here
+    printf("exchange_partitions\n");
+  }
+
+  void get_partition_with_auth(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const std::string& user_name, const std::vector<std::string> & group_names) {
+    // Your implementation goes here
+    printf("get_partition_with_auth\n");
+  }
+
+  void get_partition_by_name(Partition& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name) {
+    // Your implementation goes here
+    printf("get_partition_by_name\n");
+  }
+
+  void get_partitions(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) {
+    // Your implementation goes here
+    printf("get_partitions\n");
+  }
+
+  void get_partitions_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names) {
+    // Your implementation goes here
+    printf("get_partitions_with_auth\n");
+  }
+
+  void get_partitions_pspec(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const int32_t max_parts) {
+    // Your implementation goes here
+    printf("get_partitions_pspec\n");
+  }
+
+  void get_partition_names(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_parts) {
+    // Your implementation goes here
+    printf("get_partition_names\n");
+  }
+
+  void get_partition_values(PartitionValuesResponse& _return, const PartitionValuesRequest& request) {
+    // Your implementation goes here
+    printf("get_partition_values\n");
+  }
+
+  void get_partitions_ps(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts) {
+    // Your implementation goes here
+    printf("get_partitions_ps\n");
+  }
+
+  void get_partitions_ps_with_auth(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts, const std::string& user_name, const std::vector<std::string> & group_names) {
+    // Your implementation goes here
+    printf("get_partitions_ps_with_auth\n");
+  }
+
+  void get_partition_names_ps(std::vector<std::string> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const int16_t max_parts) {
+    // Your implementation goes here
+    printf("get_partition_names_ps\n");
+  }
+
+  void get_partitions_by_filter(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int16_t max_parts) {
+    // Your implementation goes here
+    printf("get_partitions_by_filter\n");
+  }
+
+  void get_part_specs_by_filter(std::vector<PartitionSpec> & _return, const std::string& db_name, const std::string& tbl_name, const std::string& filter, const int32_t max_parts) {
+    // Your implementation goes here
+    printf("get_part_specs_by_filter\n");
+  }
+
+  void get_partitions_by_expr(PartitionsByExprResult& _return, const PartitionsByExprRequest& req) {
+    // Your implementation goes here
+    printf("get_partitions_by_expr\n");
+  }
+
+  int32_t get_num_partitions_by_filter(const std::string& db_name, const std::string& tbl_name, const std::string& filter) {
+    // Your implementation goes here
+    printf("get_num_partitions_by_filter\n");
+  }
+
+  void get_partitions_by_names(std::vector<Partition> & _return, const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & names) {
+    // Your implementation goes here
+    printf("get_partitions_by_names\n");
+  }
+
+  void alter_partition(const std::string& db_name, const std::string& tbl_name, const Partition& new_part) {
+    // Your implementation goes here
+    printf("alter_partition\n");
+  }
+
+  void alter_partitions(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts) {
+    // Your implementation goes here
+    printf("alter_partitions\n");
+  }
+
+  void alter_partitions_with_environment_context(const std::string& db_name, const std::string& tbl_name, const std::vector<Partition> & new_parts, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("alter_partitions_with_environment_context\n");
+  }
+
+  void alter_partition_with_environment_context(const std::string& db_name, const std::string& tbl_name, const Partition& new_part, const EnvironmentContext& environment_context) {
+    // Your implementation goes here
+    printf("alter_partition_with_environment_context\n");
+  }
+
+  void rename_partition(const std::string& db_name, const std::string& tbl_name, const std::vector<std::string> & part_vals, const Partition& new_part) {
+    // Your implementation goes here
+    printf("rename_partition\n");
+  }
+
+  bool partition_name_has_valid_characters(const std::vector<std::string> & part_vals, const bool throw_exception) {
+    // Your implementation goes here
+    printf("partition_name_has_valid_characters\n");
+  }
+
+  void get_config_value(std::string& _return, const std::string& name, const std::string& defaultValue) {
+    // Your implementation goes here
+    printf("get_config_value\n");
+  }
+
+  void partition_name_to_vals(std::vector<std::string> & _return, const std::string& part_name) {
+    // Your implementation goes here
+    printf("partition_name_to_vals\n");
+  }
+
+  void partition_name_to_spec(std::map<std::string, std::string> & _return, const std::string& part_name) {
+    // Your implementation goes here
+    printf("partition_name_to_spec\n");
+  }
+
+  void markPartitionForEvent(const std::string& db_name, const std::string& tbl_name, const std::map<std::string, std::string> & part_vals, const PartitionEventType::type eventType) {
+    // Your implementation goes here
+    printf("markPartitionForEvent\n");
+  }
+
+  bool isPartitionMarkedForEvent(const std::string& db_name, const std::string& tbl_name, const std::map<std::string, std::string> & part_vals, const PartitionEventType::type eventType) {
+    // Your implementation goes here
+    printf("isPartitionMarkedForEvent\n");
+  }
+
+  void get_primary_keys(PrimaryKeysResponse& _return, const PrimaryKeysRequest& request) {
+    // Your implementation goes here
+    printf("get_primary_keys\n");
+  }
+
+  void get_foreign_keys(ForeignKeysResponse& _return, const ForeignKeysRequest& request) {
+    // Your implementation goes here
+    printf("get_foreign_keys\n");
+  }
+
+  void get_unique_constraints(UniqueConstraintsResponse& _return, const UniqueConstraintsRequest& request) {
+    // Your implementation goes here
+    printf("get_unique_constraints\n");
+  }
+
+  void get_not_null_constraints(NotNullConstraintsResponse& _return, const NotNullConstraintsRequest& request) {
+    // Your implementation goes here
+    printf("get_not_null_constraints\n");
+  }
+
+  void get_default_constraints(DefaultConstraintsResponse& _return, const DefaultConstraintsRequest& request) {
+    // Your implementation goes here
+    printf("get_default_constraints\n");
+  }
+
+  void get_check_constraints(CheckConstraintsResponse& _return, const CheckConstraintsRequest& request) {
+    // Your implementation goes here
+    printf("get_check_constraints\n");
+  }
+
+  bool update_table_column_statistics(const ColumnStatistics& stats_obj) {
+    // Your implementation goes here
+    printf("update_table_column_statistics\n");
+  }
+
+  bool update_partition_column_statistics(const ColumnStatistics& stats_obj) {
+    // Your implementation goes here
+    printf("update_partition_column_statistics\n");
+  }
+
+  void get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) {
+    // Your implementation goes here
+    printf("get_table_column_statistics\n");
+  }
+
+  void get_partition_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) {
+    // Your implementation goes here
+    printf("get_partition_column_statistics\n");
+  }
+
+  void get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) {
+    // Your implementation goes here
+    printf("get_table_statistics_req\n");
+  }
+
+  void get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) {
+    // Your implementation goes here
+    printf("get_partitions_statistics_req\n");
+  }
+
+  void get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) {
+    // Your implementation goes here
+    printf("get_aggr_stats_for\n");
+  }
+
+  bool set_aggr_stats_for(const SetPartitionsStatsRequest& request) {
+    // Your implementation goes here
+    printf("set_aggr_stats_for\n");
+  }
+
+  bool delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) {
+    // Your implementation goes here
+    printf("delete_partition_column_statistics\n");
+  }
+
+  bool delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) {
+    // Your implementation goes here
+    printf("delete_table_column_statistics\n");
+  }
+
+  void create_function(const Function& func) {
+    // Your implementation goes here
+    printf("create_function\n");
+  }
+
+  void drop_function(const std::string& dbName, const std::string& funcName) {
+    // Your implementation goes here
+    printf("drop_function\n");
+  }
+
+  void alter_function(const std::string& dbName, const std::string& funcName, const Function& newFunc) {
+    // Your implementation goes here
+    printf("alter_function\n");
+  }
+
+  void get_functions(std::vector<std::string> & _return, const std::string& dbName, const std::string& pattern) {
+    // Your implementation goes here
+    printf("get_functions\n");
+  }
+
+  void get_function(Function& _return, const std::string& dbName, const std::string& funcName) {
+    // Your implementation goes here
+    printf("get_function\n");
+  }
+
+  void get_all_functions(GetAllFunctionsResponse& _return) {
+    // Your implementation goes here
+    printf("get_all_functions\n");
+  }
+
+  bool create_role(const Role& role) {
+    // Your implementation goes here
+    printf("create_role\n");
+  }
+
+  bool drop_role(const std::string& role_name) {
+    // Your implementation goes here
+    printf("drop_role\n");
+  }
+
+  void get_role_names(std::vector<std::string> & _return) {
+    // Your implementation goes here
+    printf("get_role_names\n");
+  }
+
+  bool grant_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type, const std::string& grantor, const PrincipalType::type grantorType, const bool grant_option) {
+    // Your implementation goes here
+    printf("grant_role\n");
+  }
+
+  bool revoke_role(const std::string& role_name, const std::string& principal_name, const PrincipalType::type principal_type) {
+    // Your implementation goes here
+    printf("revoke_role\n");
+  }
+
+  void list_roles(std::vector<Role> & _return, const std::string& principal_name, const PrincipalType::type principal_type) {
+    // Your implementation goes here
+    printf("list_roles\n");
+  }
+
+  void grant_revoke_role(GrantRevokeRoleResponse& _return, const GrantRevokeRoleRequest& request) {
+    // Your implementation goes here
+    printf("grant_revoke_role\n");
+  }
+
+  void get_principals_in_role(GetPrincipalsInRoleResponse& _return, const GetPrincipalsInRoleRequest& request) {
+    // Your implementation goes here
+    printf("get_principals_in_role\n");
+  }
+
+  void get_role_grants_for_principal(GetRoleGrantsForPrincipalResponse& _return, const GetRoleGrantsForPrincipalRequest& request) {
+    // Your implementation goes here
+    printf("get_role_grants_for_principal\n");
+  }
+
+  void get_privilege_set(PrincipalPrivilegeSet& _return, const HiveObjectRef& hiveObject, const std::string& user_name, const std::vector<std::string> & group_names) {
+    // Your implementation goes here
+    printf("get_privilege_set\n");
+  }
+
+  void list_privileges(std::vector<HiveObjectPrivilege> & _return, const std::string& principal_name, const PrincipalType::type principal_type, const HiveObjectRef& hiveObject) {
+    // Your implementation goes here
+    printf("list_privileges\n");
+  }
+
+  bool grant_privileges(const PrivilegeBag& privileges) {
+    // Your implementation goes here
+    printf("grant_privileges\n");
+  }
+
+  bool revoke_privileges(const PrivilegeBag& privileges) {
+    // Your implementation goes here
+    printf("revoke_privileges\n");
+  }
+
+  void grant_revoke_privileges(GrantRevokePrivilegeResponse& _return, const GrantRevokePrivilegeRequest& request) {
+    // Your implementation goes here
+    printf("grant_revoke_privileges\n");
+  }
+
+  void refresh_privileges(GrantRevokePrivilegeResponse& _return, const HiveObjectRef& objToRefresh, const std::string& authorizer, const GrantRevokePrivilegeRequest& grantRequest) {
+    // Your implementation goes here
+    printf("refresh_privileges\n");
+  }
+
+  void set_ugi(std::vector<std::string> & _return, const std::string& user_name, const std::vector<std::string> & group_names) {
+    // Your implementation goes here
+    printf("set_ugi\n");
+  }
+
+  void get_delegation_token(std::string& _return, const std::string& token_owner, const std::string& renewer_kerberos_principal_name) {
+    // Your implementation goes here
+    printf("get_delegation_token\n");
+  }
+
+  int64_t renew_delegation_token(const std::string& token_str_form) {
+    // Your implementation goes here
+    printf("renew_delegation_token\n");
+  }
+
+  void cancel_delegation_token(const std::string& token_str_form) {
+    // Your implementation goes here
+    printf("cancel_delegation_token\n");
+  }
+
+  bool add_token(const std::string& token_identifier, const std::string& delegation_token) {
+    // Your implementation goes here
+    printf("add_token\n");
+  }
+
+  bool remove_token(const std::string& token_identifier) {
+    // Your implementation goes here
+    printf("remove_token\n");
+  }
+
+  void get_token(std::string& _return, const std::string& token_identifier) {
+    // Your implementation goes here
+    printf("get_token\n");
+  }
+
+  void get_all_token_identifiers(std::vector<std::string> & _return) {
+    // Your implementation goes here
+    printf("get_all_token_identifiers\n");
+  }
+
+  int32_t add_master_key(const std::string& key) {
+    // Your implementation goes here
+    printf("add_master_key\n");
+  }
+
+  void update_master_key(const int32_t seq_number, const std::string& key) {
+    // Your implementation goes here
+    printf("update_master_key\n");
+  }
+
+  bool remove_master_key(const int32_t key_seq) {
+    // Your implementation goes here
+    printf("remove_master_key\n");
+  }
+
+  void get_master_keys(std::vector<std::string> & _return) {
+    // Your implementation goes here
+    printf("get_master_keys\n");
+  }
+
+  void get_open_txns(GetOpenTxnsResponse& _return) {
+    // Your implementation goes here
+    printf("get_open_txns\n");
+  }
+
+  void get_open_txns_info(GetOpenTxnsInfoResponse& _return) {
+    // Your implementation goes here
+    printf("get_open_txns_info\n");
+  }
+
+  void open_txns(OpenTxnsResponse& _return, const OpenTxnRequest& rqst) {
+    // Your implementation goes here
+    printf("open_txns\n");
+  }
+
+  void abort_txn(const AbortTxnRequest& rqst) {
+    // Your implementation goes here
+    printf("abort_txn\n");
+  }
+
+  void abort_txns(const AbortTxnsRequest& rqst) {
+    // Your implementation goes here
+    printf("abort_txns\n");
+  }
+
+  void commit_txn(const CommitTxnRequest& rqst) {
+    // Your implementation goes here
+    printf("commit_txn\n");
+  }
+
+  void repl_tbl_writeid_state(const ReplTblWriteIdStateRequest& rqst) {
+    // Your implementation goes here
+    printf("repl_tbl_writeid_state\n");
+  }
+
+  void get_valid_write_ids(GetValidWriteIdsResponse& _return, const GetValidWriteIdsRequest& rqst) {
+    // Your implementation goes here
+    printf("get_valid_write_ids\n");
+  }
+
+  void allocate_table_write_ids(AllocateTableWriteIdsResponse& _return, const AllocateTableWriteIdsRequest& rqst) {
+    // Your implementation goes here
+    printf("allocate_table_write_ids\n");
+  }
+
+  void lock(LockResponse& _return, const LockRequest& rqst) {
+    // Your implementation goes here
+    printf("lock\n");
+  }
+
+  void check_lock(LockResponse& _return, const CheckLockRequest& rqst) {
+    // Your implementation goes here
+    printf("check_lock\n");
+  }
+
+  void unlock(const UnlockRequest& rqst) {
+    // Your implementation goes here
+    printf("unlock\n");
+  }
+
+  void show_locks(ShowLocksResponse& _return, const ShowLocksRequest& rqst) {
+    // Your implementation goes here
+    printf("show_locks\n");
+  }
+
+  void heartbeat(const HeartbeatRequest& ids) {
+    // Your implementation goes here
+    printf("heartbeat\n");
+  }
+
+  void heartbeat_txn_range(HeartbeatTxnRangeResponse& _return, const HeartbeatTxnRangeRequest& txns) {
+    // Your implementation goes here
+    printf("heartbeat_txn_range\n");
+  }
+
+  void compact(const CompactionRequest& rqst) {
+    // Your implementation goes here
+    printf("compact\n");
+  }
+
+  void compact2(CompactionResponse& _return, const CompactionRequest& rqst) {
+    // Your implementation goes here
+    printf("compact2\n");
+  }
+
+  void show_compact(ShowCompactResponse& _return, const ShowCompactRequest& rqst) {
+    // Your implementation goes here
+    printf("show_compact\n");
+  }
+
+  void add_dynamic_partitions(const AddDynamicPartitions& rqst) {
+    // Your implementation goes here
+    printf("add_dynamic_partitions\n");
+  }
+
+  void get_next_notification(NotificationEventResponse& _return, const NotificationEventRequest& rqst) {
+    // Your implementation goes here
+    printf("get_next_notification\n");
+  }
+
+  void get_current_notificationEventId(CurrentNotificationEventId& _return) {
+    // Your implementation goes here
+    printf("get_current_notificationEventId\n");
+  }
+
+  void get_notification_events_count(NotificationEventsCountResponse& _return, const NotificationEventsCountRequest& rqst) {
+    // Your implementation goes here
+    printf("get_notification_events_count\n");
+  }
+
+  void fire_listener_event(FireEventResponse& _return, const FireEventRequest& rqst) {
+    // Your implementation goes here
+    printf("fire_listener_event\n");
+  }
+
+  void flushCache() {
+    // Your implementation goes here
+    printf("flushCache\n");
+  }
+
+  void add_write_notification_log(WriteNotificationLogResponse& _return, const WriteNotificationLogRequest& rqst) {
+    // Your implementation goes here
+    printf("add_write_notification_log\n");
+  }
+
+  void cm_recycle(CmRecycleResponse& _return, const CmRecycleRequest& request) {
+    // Your implementation goes here
+    printf("cm_recycle\n");
+  }
+
+  void get_file_metadata_by_expr(GetFileMetadataByExprResult& _return, const GetFileMetadataByExprRequest& req) {
+    // Your implementation goes here
+    printf("get_file_metadata_by_expr\n");
+  }
+
+  void get_file_metadata(GetFileMetadataResult& _return, const GetFileMetadataRequest& req) {
+    // Your implementation goes here
+    printf("get_file_metadata\n");
+  }
+
+  void put_file_metadata(PutFileMetadataResult& _return, const PutFileMetadataRequest& req) {
+    // Your implementation goes here
+    printf("put_file_metadata\n");
+  }
+
+  void clear_file_metadata(ClearFileMetadataResult& _return, const ClearFileMetadataRequest& req) {
+    // Your implementation goes here
+    printf("clear_file_metadata\n");
+  }
+
+  void cache_file_metadata(CacheFileMetadataResult& _return, const CacheFileMetadataRequest& req) {
+    // Your implementation goes here
+    printf("cache_file_metadata\n");
+  }
+
+  void get_metastore_db_uuid(std::string& _return) {
+    // Your implementation goes here
+    printf("get_metastore_db_uuid\n");
+  }
+
+  void create_resource_plan(WMCreateResourcePlanResponse& _return, const WMCreateResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("create_resource_plan\n");
+  }
+
+  void get_resource_plan(WMGetResourcePlanResponse& _return, const WMGetResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("get_resource_plan\n");
+  }
+
+  void get_active_resource_plan(WMGetActiveResourcePlanResponse& _return, const WMGetActiveResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("get_active_resource_plan\n");
+  }
+
+  void get_all_resource_plans(WMGetAllResourcePlanResponse& _return, const WMGetAllResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("get_all_resource_plans\n");
+  }
+
+  void alter_resource_plan(WMAlterResourcePlanResponse& _return, const WMAlterResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("alter_resource_plan\n");
+  }
+
+  void validate_resource_plan(WMValidateResourcePlanResponse& _return, const WMValidateResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("validate_resource_plan\n");
+  }
+
+  void drop_resource_plan(WMDropResourcePlanResponse& _return, const WMDropResourcePlanRequest& request) {
+    // Your implementation goes here
+    printf("drop_resource_plan\n");
+  }
+
+  void create_wm_trigger(WMCreateTriggerResponse& _return, const WMCreateTriggerRequest& request) {
+    // Your implementation goes here
+    printf("create_wm_trigger\n");
+  }
+
+  void alter_wm_trigger(WMAlterTriggerResponse& _return, const WMAlterTriggerRequest& request) {
+    // Your implementation goes here
+    printf("alter_wm_trigger\n");
+  }
+
+  void drop_wm_trigger(WMDropTriggerResponse& _return, const WMDropTriggerRequest& request) {
+    // Your implementation goes here
+    printf("drop_wm_trigger\n");
+  }
+
+  void get_triggers_for_resourceplan(WMGetTriggersForResourePlanResponse& _return, const WMGetTriggersForResourePlanRequest& request) {
+    // Your implementation goes here
+    printf("get_triggers_for_resourceplan\n");
+  }
+
+  void create_wm_pool(WMCreatePoolResponse& _return, const WMCreatePoolRequest& request) {
+    // Your implementation goes here
+    printf("create_wm_pool\n");
+  }
+
+  void alter_wm_pool(WMAlterPoolResponse& _return, const WMAlterPoolRequest& request) {
+    // Your implementation goes here
+    printf("alter_wm_pool\n");
+  }
+
+  void drop_wm_pool(WMDropPoolResponse& _return, const WMDropPoolRequest& request) {
+    // Your implementation goes here
+    printf("drop_wm_pool\n");
+  }
+
+  void create_or_update_wm_mapping(WMCreateOrUpdateMappingResponse& _return, const WMCreateOrUpdateMappingRequest& request) {
+    // Your implementation goes here
+    printf("create_or_update_wm_mapping\n");
+  }
+
+  void drop_wm_mapping(WMDropMappingResponse& _return, const WMDropMappingRequest& request) {
+    // Your implementation goes here
+    printf("drop_wm_mapping\n");
+  }
+
+  void create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingResponse& _return, const WMCreateOrDropTriggerToPoolMappingRequest& request) {
+    // Your implementation goes here
+    printf("create_or_drop_wm_trigger_to_pool_mapping\n");
+  }
+
+  void create_ischema(const ISchema& schema) {
+    // Your implementation goes here
+    printf("create_ischema\n");
+  }
+
+  void alter_ischema(const AlterISchemaRequest& rqst) {
+    // Your implementation goes here
+    printf("alter_ischema\n");
+  }
+
+  void get_ischema(ISchema& _return, const ISchemaName& name) {
+    // Your implementation goes here
+    printf("get_ischema\n");
+  }
+
+  void drop_ischema(const ISchemaName& name) {
+    // Your implementation goes here
+    printf("drop_ischema\n");
+  }
+
+  void add_schema_version(const SchemaVersion& schemaVersion) {
+    // Your implementation goes here
+    printf("add_schema_version\n");
+  }
+
+  void get_schema_version(SchemaVersion& _return, const SchemaVersionDescriptor& schemaVersion) {
+    // Your implementation goes here
+    printf("get_schema_version\n");
+  }
+
+  void get_schema_latest_version(SchemaVersion& _return, const ISchemaName& schemaName) {
+    // Your implementation goes here
+    printf("get_schema_latest_version\n");
+  }
+
+  void get_schema_all_versions(std::vector<SchemaVersion> & _return, const ISchemaName& schemaName) {
+    // Your implementation goes here
+    printf("get_schema_all_versions\n");
+  }
+
+  void drop_schema_version(const SchemaVersionDescriptor& schemaVersion) {
+    // Your implementation goes here
+    printf("drop_schema_version\n");
+  }
+
+  void get_schemas_by_cols(FindSchemasByColsResp& _return, const FindSchemasByColsRqst& rqst) {
+    // Your implementation goes here
+    printf("get_schemas_by_cols\n");
+  }
+
+  void map_schema_version_to_serde(const MapSchemaVersionToSerdeRequest& rqst) {
+    // Your implementation goes here
+    printf("map_schema_version_to_serde\n");
+  }
+
+  void set_schema_version_state(const SetSchemaVersionStateRequest& rqst) {
+    // Your implementation goes here
+    printf("set_schema_version_state\n");
+  }
+
+  void add_serde(const SerDeInfo& serde) {
+    // Your implementation goes here
+    printf("add_serde\n");
+  }
+
+  void get_serde(SerDeInfo& _return, const GetSerdeRequest& rqst) {
+    // Your implementation goes here
+    printf("get_serde\n");
+  }
+
+  void get_lock_materialization_rebuild(LockResponse& _return, const std::string& dbName, const std::string& tableName, const int64_t txnId) {
+    // Your implementation goes here
+    printf("get_lock_materialization_rebuild\n");
+  }
+
+  bool heartbeat_lock_materialization_rebuild(const std::string& dbName, const std::string& tableName, const int64_t txnId) {
+    // Your implementation goes here
+    printf("heartbeat_lock_materialization_rebuild\n");
+  }
+
+  void add_runtime_stats(const RuntimeStat& stat) {
+    // Your implementation goes here
+    printf("add_runtime_stats\n");
+  }
+
+  void get_runtime_stats(std::vector<RuntimeStat> & _return, const GetRuntimeStatsRequest& rqst) {
+    // Your implementation goes here
+    printf("get_runtime_stats\n");
+  }
+
+};
+
+int main(int argc, char **argv) {
+  int port = 9090;
+  shared_ptr<ThriftHiveMetastoreHandler> handler(new ThriftHiveMetastoreHandler());
+  shared_ptr<TProcessor> processor(new ThriftHiveMetastoreProcessor(handler));
+  shared_ptr<TServerTransport> serverTransport(new TServerSocket(port));
+  shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
+  shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
+
+  TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory);
+  server.serve();
+  return 0;
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
new file mode 100644
index 0000000..1c1b3ce
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.cpp
@@ -0,0 +1,67 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "hive_metastore_constants.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+const hive_metastoreConstants g_hive_metastore_constants;
+
+hive_metastoreConstants::hive_metastoreConstants() {
+  DDL_TIME = "transient_lastDdlTime";
+
+  HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__";
+
+  HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__";
+
+  HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__";
+
+  IS_ARCHIVED = "is_archived";
+
+  ORIGINAL_LOCATION = "original_location";
+
+  IS_IMMUTABLE = "immutable";
+
+  META_TABLE_COLUMNS = "columns";
+
+  META_TABLE_COLUMN_TYPES = "columns.types";
+
+  BUCKET_FIELD_NAME = "bucket_field_name";
+
+  BUCKET_COUNT = "bucket_count";
+
+  FIELD_TO_DIMENSION = "field_to_dimension";
+
+  META_TABLE_NAME = "name";
+
+  META_TABLE_DB = "db";
+
+  META_TABLE_LOCATION = "location";
+
+  META_TABLE_SERDE = "serde";
+
+  META_TABLE_PARTITION_COLUMNS = "partition_columns";
+
+  META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types";
+
+  FILE_INPUT_FORMAT = "file.inputformat";
+
+  FILE_OUTPUT_FORMAT = "file.outputformat";
+
+  META_TABLE_STORAGE = "storage_handler";
+
+  TABLE_IS_TRANSACTIONAL = "transactional";
+
+  TABLE_NO_AUTO_COMPACT = "no_auto_compaction";
+
+  TABLE_TRANSACTIONAL_PROPERTIES = "transactional_properties";
+
+  TABLE_BUCKETING_VERSION = "bucketing_version";
+
+}
+
+}}} // namespace
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h
new file mode 100644
index 0000000..1f06253
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_constants.h
@@ -0,0 +1,49 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#ifndef hive_metastore_CONSTANTS_H
+#define hive_metastore_CONSTANTS_H
+
+#include "hive_metastore_types.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+class hive_metastoreConstants {
+ public:
+  hive_metastoreConstants();
+
+  std::string DDL_TIME;
+  std::string HIVE_FILTER_FIELD_OWNER;
+  std::string HIVE_FILTER_FIELD_PARAMS;
+  std::string HIVE_FILTER_FIELD_LAST_ACCESS;
+  std::string IS_ARCHIVED;
+  std::string ORIGINAL_LOCATION;
+  std::string IS_IMMUTABLE;
+  std::string META_TABLE_COLUMNS;
+  std::string META_TABLE_COLUMN_TYPES;
+  std::string BUCKET_FIELD_NAME;
+  std::string BUCKET_COUNT;
+  std::string FIELD_TO_DIMENSION;
+  std::string META_TABLE_NAME;
+  std::string META_TABLE_DB;
+  std::string META_TABLE_LOCATION;
+  std::string META_TABLE_SERDE;
+  std::string META_TABLE_PARTITION_COLUMNS;
+  std::string META_TABLE_PARTITION_COLUMN_TYPES;
+  std::string FILE_INPUT_FORMAT;
+  std::string FILE_OUTPUT_FORMAT;
+  std::string META_TABLE_STORAGE;
+  std::string TABLE_IS_TRANSACTIONAL;
+  std::string TABLE_NO_AUTO_COMPACT;
+  std::string TABLE_TRANSACTIONAL_PROPERTIES;
+  std::string TABLE_BUCKETING_VERSION;
+};
+
+extern const hive_metastoreConstants g_hive_metastore_constants;
+
+}}} // namespace
+
+#endif


[37/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
new file mode 100644
index 0000000..4cd04f1
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapabilities.java
@@ -0,0 +1,441 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ClientCapabilities implements org.apache.thrift.TBase<ClientCapabilities, ClientCapabilities._Fields>, java.io.Serializable, Cloneable, Comparable<ClientCapabilities> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClientCapabilities");
+
+  private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ClientCapabilitiesStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ClientCapabilitiesTupleSchemeFactory());
+  }
+
+  private List<ClientCapability> values; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    VALUES((short)1, "values");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // VALUES
+          return VALUES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ClientCapability.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClientCapabilities.class, metaDataMap);
+  }
+
+  public ClientCapabilities() {
+  }
+
+  public ClientCapabilities(
+    List<ClientCapability> values)
+  {
+    this();
+    this.values = values;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ClientCapabilities(ClientCapabilities other) {
+    if (other.isSetValues()) {
+      List<ClientCapability> __this__values = new ArrayList<ClientCapability>(other.values.size());
+      for (ClientCapability other_element : other.values) {
+        __this__values.add(other_element);
+      }
+      this.values = __this__values;
+    }
+  }
+
+  public ClientCapabilities deepCopy() {
+    return new ClientCapabilities(this);
+  }
+
+  @Override
+  public void clear() {
+    this.values = null;
+  }
+
+  public int getValuesSize() {
+    return (this.values == null) ? 0 : this.values.size();
+  }
+
+  public java.util.Iterator<ClientCapability> getValuesIterator() {
+    return (this.values == null) ? null : this.values.iterator();
+  }
+
+  public void addToValues(ClientCapability elem) {
+    if (this.values == null) {
+      this.values = new ArrayList<ClientCapability>();
+    }
+    this.values.add(elem);
+  }
+
+  public List<ClientCapability> getValues() {
+    return this.values;
+  }
+
+  public void setValues(List<ClientCapability> values) {
+    this.values = values;
+  }
+
+  public void unsetValues() {
+    this.values = null;
+  }
+
+  /** Returns true if field values is set (has been assigned a value) and false otherwise */
+  public boolean isSetValues() {
+    return this.values != null;
+  }
+
+  public void setValuesIsSet(boolean value) {
+    if (!value) {
+      this.values = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case VALUES:
+      if (value == null) {
+        unsetValues();
+      } else {
+        setValues((List<ClientCapability>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case VALUES:
+      return getValues();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case VALUES:
+      return isSetValues();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ClientCapabilities)
+      return this.equals((ClientCapabilities)that);
+    return false;
+  }
+
+  public boolean equals(ClientCapabilities that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_values = true && this.isSetValues();
+    boolean that_present_values = true && that.isSetValues();
+    if (this_present_values || that_present_values) {
+      if (!(this_present_values && that_present_values))
+        return false;
+      if (!this.values.equals(that.values))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_values = true && (isSetValues());
+    list.add(present_values);
+    if (present_values)
+      list.add(values);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ClientCapabilities other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValues()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ClientCapabilities(");
+    boolean first = true;
+
+    sb.append("values:");
+    if (this.values == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.values);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetValues()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'values' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ClientCapabilitiesStandardSchemeFactory implements SchemeFactory {
+    public ClientCapabilitiesStandardScheme getScheme() {
+      return new ClientCapabilitiesStandardScheme();
+    }
+  }
+
+  private static class ClientCapabilitiesStandardScheme extends StandardScheme<ClientCapabilities> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ClientCapabilities struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // VALUES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list840 = iprot.readListBegin();
+                struct.values = new ArrayList<ClientCapability>(_list840.size);
+                ClientCapability _elem841;
+                for (int _i842 = 0; _i842 < _list840.size; ++_i842)
+                {
+                  _elem841 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+                  struct.values.add(_elem841);
+                }
+                iprot.readListEnd();
+              }
+              struct.setValuesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ClientCapabilities struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.values != null) {
+        oprot.writeFieldBegin(VALUES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, struct.values.size()));
+          for (ClientCapability _iter843 : struct.values)
+          {
+            oprot.writeI32(_iter843.getValue());
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ClientCapabilitiesTupleSchemeFactory implements SchemeFactory {
+    public ClientCapabilitiesTupleScheme getScheme() {
+      return new ClientCapabilitiesTupleScheme();
+    }
+  }
+
+  private static class ClientCapabilitiesTupleScheme extends TupleScheme<ClientCapabilities> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.values.size());
+        for (ClientCapability _iter844 : struct.values)
+        {
+          oprot.writeI32(_iter844.getValue());
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ClientCapabilities struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list845 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
+        struct.values = new ArrayList<ClientCapability>(_list845.size);
+        ClientCapability _elem846;
+        for (int _i847 = 0; _i847 < _list845.size; ++_i847)
+        {
+          _elem846 = org.apache.hadoop.hive.metastore.api.ClientCapability.findByValue(iprot.readI32());
+          struct.values.add(_elem846);
+        }
+      }
+      struct.setValuesIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
new file mode 100644
index 0000000..8fc8311
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClientCapability.java
@@ -0,0 +1,45 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum ClientCapability implements org.apache.thrift.TEnum {
+  TEST_CAPABILITY(1),
+  INSERT_ONLY_TABLES(2);
+
+  private final int value;
+
+  private ClientCapability(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static ClientCapability findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return TEST_CAPABILITY;
+      case 2:
+        return INSERT_ONLY_TABLES;
+      default:
+        return null;
+    }
+  }
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleRequest.java
new file mode 100644
index 0000000..77d8876
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleRequest.java
@@ -0,0 +1,488 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CmRecycleRequest implements org.apache.thrift.TBase<CmRecycleRequest, CmRecycleRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CmRecycleRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CmRecycleRequest");
+
+  private static final org.apache.thrift.protocol.TField DATA_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("dataPath", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField PURGE_FIELD_DESC = new org.apache.thrift.protocol.TField("purge", org.apache.thrift.protocol.TType.BOOL, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CmRecycleRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CmRecycleRequestTupleSchemeFactory());
+  }
+
+  private String dataPath; // required
+  private boolean purge; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DATA_PATH((short)1, "dataPath"),
+    PURGE((short)2, "purge");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DATA_PATH
+          return DATA_PATH;
+        case 2: // PURGE
+          return PURGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __PURGE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DATA_PATH, new org.apache.thrift.meta_data.FieldMetaData("dataPath", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PURGE, new org.apache.thrift.meta_data.FieldMetaData("purge", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CmRecycleRequest.class, metaDataMap);
+  }
+
+  public CmRecycleRequest() {
+  }
+
+  public CmRecycleRequest(
+    String dataPath,
+    boolean purge)
+  {
+    this();
+    this.dataPath = dataPath;
+    this.purge = purge;
+    setPurgeIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CmRecycleRequest(CmRecycleRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetDataPath()) {
+      this.dataPath = other.dataPath;
+    }
+    this.purge = other.purge;
+  }
+
+  public CmRecycleRequest deepCopy() {
+    return new CmRecycleRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dataPath = null;
+    setPurgeIsSet(false);
+    this.purge = false;
+  }
+
+  public String getDataPath() {
+    return this.dataPath;
+  }
+
+  public void setDataPath(String dataPath) {
+    this.dataPath = dataPath;
+  }
+
+  public void unsetDataPath() {
+    this.dataPath = null;
+  }
+
+  /** Returns true if field dataPath is set (has been assigned a value) and false otherwise */
+  public boolean isSetDataPath() {
+    return this.dataPath != null;
+  }
+
+  public void setDataPathIsSet(boolean value) {
+    if (!value) {
+      this.dataPath = null;
+    }
+  }
+
+  public boolean isPurge() {
+    return this.purge;
+  }
+
+  public void setPurge(boolean purge) {
+    this.purge = purge;
+    setPurgeIsSet(true);
+  }
+
+  public void unsetPurge() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PURGE_ISSET_ID);
+  }
+
+  /** Returns true if field purge is set (has been assigned a value) and false otherwise */
+  public boolean isSetPurge() {
+    return EncodingUtils.testBit(__isset_bitfield, __PURGE_ISSET_ID);
+  }
+
+  public void setPurgeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PURGE_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DATA_PATH:
+      if (value == null) {
+        unsetDataPath();
+      } else {
+        setDataPath((String)value);
+      }
+      break;
+
+    case PURGE:
+      if (value == null) {
+        unsetPurge();
+      } else {
+        setPurge((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DATA_PATH:
+      return getDataPath();
+
+    case PURGE:
+      return isPurge();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DATA_PATH:
+      return isSetDataPath();
+    case PURGE:
+      return isSetPurge();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CmRecycleRequest)
+      return this.equals((CmRecycleRequest)that);
+    return false;
+  }
+
+  public boolean equals(CmRecycleRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dataPath = true && this.isSetDataPath();
+    boolean that_present_dataPath = true && that.isSetDataPath();
+    if (this_present_dataPath || that_present_dataPath) {
+      if (!(this_present_dataPath && that_present_dataPath))
+        return false;
+      if (!this.dataPath.equals(that.dataPath))
+        return false;
+    }
+
+    boolean this_present_purge = true;
+    boolean that_present_purge = true;
+    if (this_present_purge || that_present_purge) {
+      if (!(this_present_purge && that_present_purge))
+        return false;
+      if (this.purge != that.purge)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dataPath = true && (isSetDataPath());
+    list.add(present_dataPath);
+    if (present_dataPath)
+      list.add(dataPath);
+
+    boolean present_purge = true;
+    list.add(present_purge);
+    if (present_purge)
+      list.add(purge);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CmRecycleRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDataPath()).compareTo(other.isSetDataPath());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDataPath()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dataPath, other.dataPath);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPurge()).compareTo(other.isSetPurge());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPurge()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.purge, other.purge);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CmRecycleRequest(");
+    boolean first = true;
+
+    sb.append("dataPath:");
+    if (this.dataPath == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dataPath);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("purge:");
+    sb.append(this.purge);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDataPath()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dataPath' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPurge()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'purge' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CmRecycleRequestStandardSchemeFactory implements SchemeFactory {
+    public CmRecycleRequestStandardScheme getScheme() {
+      return new CmRecycleRequestStandardScheme();
+    }
+  }
+
+  private static class CmRecycleRequestStandardScheme extends StandardScheme<CmRecycleRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CmRecycleRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DATA_PATH
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dataPath = iprot.readString();
+              struct.setDataPathIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PURGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.purge = iprot.readBool();
+              struct.setPurgeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CmRecycleRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dataPath != null) {
+        oprot.writeFieldBegin(DATA_PATH_FIELD_DESC);
+        oprot.writeString(struct.dataPath);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(PURGE_FIELD_DESC);
+      oprot.writeBool(struct.purge);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CmRecycleRequestTupleSchemeFactory implements SchemeFactory {
+    public CmRecycleRequestTupleScheme getScheme() {
+      return new CmRecycleRequestTupleScheme();
+    }
+  }
+
+  private static class CmRecycleRequestTupleScheme extends TupleScheme<CmRecycleRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CmRecycleRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dataPath);
+      oprot.writeBool(struct.purge);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CmRecycleRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dataPath = iprot.readString();
+      struct.setDataPathIsSet(true);
+      struct.purge = iprot.readBool();
+      struct.setPurgeIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleResponse.java
new file mode 100644
index 0000000..15ea318
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CmRecycleResponse.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CmRecycleResponse implements org.apache.thrift.TBase<CmRecycleResponse, CmRecycleResponse._Fields>, java.io.Serializable, Cloneable, Comparable<CmRecycleResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CmRecycleResponse");
+
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CmRecycleResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CmRecycleResponseTupleSchemeFactory());
+  }
+
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CmRecycleResponse.class, metaDataMap);
+  }
+
+  public CmRecycleResponse() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CmRecycleResponse(CmRecycleResponse other) {
+  }
+
+  public CmRecycleResponse deepCopy() {
+    return new CmRecycleResponse(this);
+  }
+
+  @Override
+  public void clear() {
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CmRecycleResponse)
+      return this.equals((CmRecycleResponse)that);
+    return false;
+  }
+
+  public boolean equals(CmRecycleResponse that) {
+    if (that == null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CmRecycleResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CmRecycleResponse(");
+    boolean first = true;
+
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CmRecycleResponseStandardSchemeFactory implements SchemeFactory {
+    public CmRecycleResponseStandardScheme getScheme() {
+      return new CmRecycleResponseStandardScheme();
+    }
+  }
+
+  private static class CmRecycleResponseStandardScheme extends StandardScheme<CmRecycleResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CmRecycleResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CmRecycleResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CmRecycleResponseTupleSchemeFactory implements SchemeFactory {
+    public CmRecycleResponseTupleScheme getScheme() {
+      return new CmRecycleResponseTupleScheme();
+    }
+  }
+
+  private static class CmRecycleResponseTupleScheme extends TupleScheme<CmRecycleResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CmRecycleResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CmRecycleResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
new file mode 100644
index 0000000..6ce7214
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java
@@ -0,0 +1,549 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ColumnStatistics implements org.apache.thrift.TBase<ColumnStatistics, ColumnStatistics._Fields>, java.io.Serializable, Cloneable, Comparable<ColumnStatistics> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatistics");
+
+  private static final org.apache.thrift.protocol.TField STATS_DESC_FIELD_DESC = new org.apache.thrift.protocol.TField("statsDesc", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField STATS_OBJ_FIELD_DESC = new org.apache.thrift.protocol.TField("statsObj", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ColumnStatisticsStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ColumnStatisticsTupleSchemeFactory());
+  }
+
+  private ColumnStatisticsDesc statsDesc; // required
+  private List<ColumnStatisticsObj> statsObj; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    STATS_DESC((short)1, "statsDesc"),
+    STATS_OBJ((short)2, "statsObj");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // STATS_DESC
+          return STATS_DESC;
+        case 2: // STATS_OBJ
+          return STATS_OBJ;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.STATS_DESC, new org.apache.thrift.meta_data.FieldMetaData("statsDesc", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsDesc.class)));
+    tmpMap.put(_Fields.STATS_OBJ, new org.apache.thrift.meta_data.FieldMetaData("statsObj", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatistics.class, metaDataMap);
+  }
+
+  public ColumnStatistics() {
+  }
+
+  public ColumnStatistics(
+    ColumnStatisticsDesc statsDesc,
+    List<ColumnStatisticsObj> statsObj)
+  {
+    this();
+    this.statsDesc = statsDesc;
+    this.statsObj = statsObj;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ColumnStatistics(ColumnStatistics other) {
+    if (other.isSetStatsDesc()) {
+      this.statsDesc = new ColumnStatisticsDesc(other.statsDesc);
+    }
+    if (other.isSetStatsObj()) {
+      List<ColumnStatisticsObj> __this__statsObj = new ArrayList<ColumnStatisticsObj>(other.statsObj.size());
+      for (ColumnStatisticsObj other_element : other.statsObj) {
+        __this__statsObj.add(new ColumnStatisticsObj(other_element));
+      }
+      this.statsObj = __this__statsObj;
+    }
+  }
+
+  public ColumnStatistics deepCopy() {
+    return new ColumnStatistics(this);
+  }
+
+  @Override
+  public void clear() {
+    this.statsDesc = null;
+    this.statsObj = null;
+  }
+
+  public ColumnStatisticsDesc getStatsDesc() {
+    return this.statsDesc;
+  }
+
+  public void setStatsDesc(ColumnStatisticsDesc statsDesc) {
+    this.statsDesc = statsDesc;
+  }
+
+  public void unsetStatsDesc() {
+    this.statsDesc = null;
+  }
+
+  /** Returns true if field statsDesc is set (has been assigned a value) and false otherwise */
+  public boolean isSetStatsDesc() {
+    return this.statsDesc != null;
+  }
+
+  public void setStatsDescIsSet(boolean value) {
+    if (!value) {
+      this.statsDesc = null;
+    }
+  }
+
+  public int getStatsObjSize() {
+    return (this.statsObj == null) ? 0 : this.statsObj.size();
+  }
+
+  public java.util.Iterator<ColumnStatisticsObj> getStatsObjIterator() {
+    return (this.statsObj == null) ? null : this.statsObj.iterator();
+  }
+
+  public void addToStatsObj(ColumnStatisticsObj elem) {
+    if (this.statsObj == null) {
+      this.statsObj = new ArrayList<ColumnStatisticsObj>();
+    }
+    this.statsObj.add(elem);
+  }
+
+  public List<ColumnStatisticsObj> getStatsObj() {
+    return this.statsObj;
+  }
+
+  public void setStatsObj(List<ColumnStatisticsObj> statsObj) {
+    this.statsObj = statsObj;
+  }
+
+  public void unsetStatsObj() {
+    this.statsObj = null;
+  }
+
+  /** Returns true if field statsObj is set (has been assigned a value) and false otherwise */
+  public boolean isSetStatsObj() {
+    return this.statsObj != null;
+  }
+
+  public void setStatsObjIsSet(boolean value) {
+    if (!value) {
+      this.statsObj = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case STATS_DESC:
+      if (value == null) {
+        unsetStatsDesc();
+      } else {
+        setStatsDesc((ColumnStatisticsDesc)value);
+      }
+      break;
+
+    case STATS_OBJ:
+      if (value == null) {
+        unsetStatsObj();
+      } else {
+        setStatsObj((List<ColumnStatisticsObj>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case STATS_DESC:
+      return getStatsDesc();
+
+    case STATS_OBJ:
+      return getStatsObj();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case STATS_DESC:
+      return isSetStatsDesc();
+    case STATS_OBJ:
+      return isSetStatsObj();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ColumnStatistics)
+      return this.equals((ColumnStatistics)that);
+    return false;
+  }
+
+  public boolean equals(ColumnStatistics that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_statsDesc = true && this.isSetStatsDesc();
+    boolean that_present_statsDesc = true && that.isSetStatsDesc();
+    if (this_present_statsDesc || that_present_statsDesc) {
+      if (!(this_present_statsDesc && that_present_statsDesc))
+        return false;
+      if (!this.statsDesc.equals(that.statsDesc))
+        return false;
+    }
+
+    boolean this_present_statsObj = true && this.isSetStatsObj();
+    boolean that_present_statsObj = true && that.isSetStatsObj();
+    if (this_present_statsObj || that_present_statsObj) {
+      if (!(this_present_statsObj && that_present_statsObj))
+        return false;
+      if (!this.statsObj.equals(that.statsObj))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_statsDesc = true && (isSetStatsDesc());
+    list.add(present_statsDesc);
+    if (present_statsDesc)
+      list.add(statsDesc);
+
+    boolean present_statsObj = true && (isSetStatsObj());
+    list.add(present_statsObj);
+    if (present_statsObj)
+      list.add(statsObj);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ColumnStatistics other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetStatsDesc()).compareTo(other.isSetStatsDesc());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetStatsDesc()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statsDesc, other.statsDesc);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetStatsObj()).compareTo(other.isSetStatsObj());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetStatsObj()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.statsObj, other.statsObj);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ColumnStatistics(");
+    boolean first = true;
+
+    sb.append("statsDesc:");
+    if (this.statsDesc == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.statsDesc);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("statsObj:");
+    if (this.statsObj == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.statsObj);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetStatsDesc()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'statsDesc' is unset! Struct:" + toString());
+    }
+
+    if (!isSetStatsObj()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'statsObj' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (statsDesc != null) {
+      statsDesc.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ColumnStatisticsStandardSchemeFactory implements SchemeFactory {
+    public ColumnStatisticsStandardScheme getScheme() {
+      return new ColumnStatisticsStandardScheme();
+    }
+  }
+
+  private static class ColumnStatisticsStandardScheme extends StandardScheme<ColumnStatistics> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // STATS_DESC
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.statsDesc = new ColumnStatisticsDesc();
+              struct.statsDesc.read(iprot);
+              struct.setStatsDescIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // STATS_OBJ
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list268 = iprot.readListBegin();
+                struct.statsObj = new ArrayList<ColumnStatisticsObj>(_list268.size);
+                ColumnStatisticsObj _elem269;
+                for (int _i270 = 0; _i270 < _list268.size; ++_i270)
+                {
+                  _elem269 = new ColumnStatisticsObj();
+                  _elem269.read(iprot);
+                  struct.statsObj.add(_elem269);
+                }
+                iprot.readListEnd();
+              }
+              struct.setStatsObjIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.statsDesc != null) {
+        oprot.writeFieldBegin(STATS_DESC_FIELD_DESC);
+        struct.statsDesc.write(oprot);
+        oprot.writeFieldEnd();
+      }
+      if (struct.statsObj != null) {
+        oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.statsObj.size()));
+          for (ColumnStatisticsObj _iter271 : struct.statsObj)
+          {
+            _iter271.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ColumnStatisticsTupleSchemeFactory implements SchemeFactory {
+    public ColumnStatisticsTupleScheme getScheme() {
+      return new ColumnStatisticsTupleScheme();
+    }
+  }
+
+  private static class ColumnStatisticsTupleScheme extends TupleScheme<ColumnStatistics> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      struct.statsDesc.write(oprot);
+      {
+        oprot.writeI32(struct.statsObj.size());
+        for (ColumnStatisticsObj _iter272 : struct.statsObj)
+        {
+          _iter272.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.statsDesc = new ColumnStatisticsDesc();
+      struct.statsDesc.read(iprot);
+      struct.setStatsDescIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list273 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.statsObj = new ArrayList<ColumnStatisticsObj>(_list273.size);
+        ColumnStatisticsObj _elem274;
+        for (int _i275 = 0; _i275 < _list273.size; ++_i275)
+        {
+          _elem274 = new ColumnStatisticsObj();
+          _elem274.read(iprot);
+          struct.statsObj.add(_elem274);
+        }
+      }
+      struct.setStatsObjIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsData.java
new file mode 100644
index 0000000..9a2e4f4
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatisticsData.java
@@ -0,0 +1,675 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ColumnStatisticsData extends org.apache.thrift.TUnion<ColumnStatisticsData, ColumnStatisticsData._Fields> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ColumnStatisticsData");
+  private static final org.apache.thrift.protocol.TField BOOLEAN_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("booleanStats", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField LONG_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("longStats", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField DOUBLE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("doubleStats", org.apache.thrift.protocol.TType.STRUCT, (short)3);
+  private static final org.apache.thrift.protocol.TField STRING_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("stringStats", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+  private static final org.apache.thrift.protocol.TField BINARY_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("binaryStats", org.apache.thrift.protocol.TType.STRUCT, (short)5);
+  private static final org.apache.thrift.protocol.TField DECIMAL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("decimalStats", org.apache.thrift.protocol.TType.STRUCT, (short)6);
+  private static final org.apache.thrift.protocol.TField DATE_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("dateStats", org.apache.thrift.protocol.TType.STRUCT, (short)7);
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    BOOLEAN_STATS((short)1, "booleanStats"),
+    LONG_STATS((short)2, "longStats"),
+    DOUBLE_STATS((short)3, "doubleStats"),
+    STRING_STATS((short)4, "stringStats"),
+    BINARY_STATS((short)5, "binaryStats"),
+    DECIMAL_STATS((short)6, "decimalStats"),
+    DATE_STATS((short)7, "dateStats");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // BOOLEAN_STATS
+          return BOOLEAN_STATS;
+        case 2: // LONG_STATS
+          return LONG_STATS;
+        case 3: // DOUBLE_STATS
+          return DOUBLE_STATS;
+        case 4: // STRING_STATS
+          return STRING_STATS;
+        case 5: // BINARY_STATS
+          return BINARY_STATS;
+        case 6: // DECIMAL_STATS
+          return DECIMAL_STATS;
+        case 7: // DATE_STATS
+          return DATE_STATS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.BOOLEAN_STATS, new org.apache.thrift.meta_data.FieldMetaData("booleanStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, BooleanColumnStatsData.class)));
+    tmpMap.put(_Fields.LONG_STATS, new org.apache.thrift.meta_data.FieldMetaData("longStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, LongColumnStatsData.class)));
+    tmpMap.put(_Fields.DOUBLE_STATS, new org.apache.thrift.meta_data.FieldMetaData("doubleStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DoubleColumnStatsData.class)));
+    tmpMap.put(_Fields.STRING_STATS, new org.apache.thrift.meta_data.FieldMetaData("stringStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StringColumnStatsData.class)));
+    tmpMap.put(_Fields.BINARY_STATS, new org.apache.thrift.meta_data.FieldMetaData("binaryStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, BinaryColumnStatsData.class)));
+    tmpMap.put(_Fields.DECIMAL_STATS, new org.apache.thrift.meta_data.FieldMetaData("decimalStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DecimalColumnStatsData.class)));
+    tmpMap.put(_Fields.DATE_STATS, new org.apache.thrift.meta_data.FieldMetaData("dateStats", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DateColumnStatsData.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ColumnStatisticsData.class, metaDataMap);
+  }
+
+  public ColumnStatisticsData() {
+    super();
+  }
+
+  public ColumnStatisticsData(_Fields setField, Object value) {
+    super(setField, value);
+  }
+
+  public ColumnStatisticsData(ColumnStatisticsData other) {
+    super(other);
+  }
+  public ColumnStatisticsData deepCopy() {
+    return new ColumnStatisticsData(this);
+  }
+
+  public static ColumnStatisticsData booleanStats(BooleanColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setBooleanStats(value);
+    return x;
+  }
+
+  public static ColumnStatisticsData longStats(LongColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setLongStats(value);
+    return x;
+  }
+
+  public static ColumnStatisticsData doubleStats(DoubleColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setDoubleStats(value);
+    return x;
+  }
+
+  public static ColumnStatisticsData stringStats(StringColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setStringStats(value);
+    return x;
+  }
+
+  public static ColumnStatisticsData binaryStats(BinaryColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setBinaryStats(value);
+    return x;
+  }
+
+  public static ColumnStatisticsData decimalStats(DecimalColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setDecimalStats(value);
+    return x;
+  }
+
+  public static ColumnStatisticsData dateStats(DateColumnStatsData value) {
+    ColumnStatisticsData x = new ColumnStatisticsData();
+    x.setDateStats(value);
+    return x;
+  }
+
+
+  @Override
+  protected void checkType(_Fields setField, Object value) throws ClassCastException {
+    switch (setField) {
+      case BOOLEAN_STATS:
+        if (value instanceof BooleanColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type BooleanColumnStatsData for field 'booleanStats', but got " + value.getClass().getSimpleName());
+      case LONG_STATS:
+        if (value instanceof LongColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type LongColumnStatsData for field 'longStats', but got " + value.getClass().getSimpleName());
+      case DOUBLE_STATS:
+        if (value instanceof DoubleColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type DoubleColumnStatsData for field 'doubleStats', but got " + value.getClass().getSimpleName());
+      case STRING_STATS:
+        if (value instanceof StringColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type StringColumnStatsData for field 'stringStats', but got " + value.getClass().getSimpleName());
+      case BINARY_STATS:
+        if (value instanceof BinaryColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type BinaryColumnStatsData for field 'binaryStats', but got " + value.getClass().getSimpleName());
+      case DECIMAL_STATS:
+        if (value instanceof DecimalColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type DecimalColumnStatsData for field 'decimalStats', but got " + value.getClass().getSimpleName());
+      case DATE_STATS:
+        if (value instanceof DateColumnStatsData) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type DateColumnStatsData for field 'dateStats', but got " + value.getClass().getSimpleName());
+      default:
+        throw new IllegalArgumentException("Unknown field id " + setField);
+    }
+  }
+
+  @Override
+  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
+    _Fields setField = _Fields.findByThriftId(field.id);
+    if (setField != null) {
+      switch (setField) {
+        case BOOLEAN_STATS:
+          if (field.type == BOOLEAN_STATS_FIELD_DESC.type) {
+            BooleanColumnStatsData booleanStats;
+            booleanStats = new BooleanColumnStatsData();
+            booleanStats.read(iprot);
+            return booleanStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case LONG_STATS:
+          if (field.type == LONG_STATS_FIELD_DESC.type) {
+            LongColumnStatsData longStats;
+            longStats = new LongColumnStatsData();
+            longStats.read(iprot);
+            return longStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case DOUBLE_STATS:
+          if (field.type == DOUBLE_STATS_FIELD_DESC.type) {
+            DoubleColumnStatsData doubleStats;
+            doubleStats = new DoubleColumnStatsData();
+            doubleStats.read(iprot);
+            return doubleStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case STRING_STATS:
+          if (field.type == STRING_STATS_FIELD_DESC.type) {
+            StringColumnStatsData stringStats;
+            stringStats = new StringColumnStatsData();
+            stringStats.read(iprot);
+            return stringStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case BINARY_STATS:
+          if (field.type == BINARY_STATS_FIELD_DESC.type) {
+            BinaryColumnStatsData binaryStats;
+            binaryStats = new BinaryColumnStatsData();
+            binaryStats.read(iprot);
+            return binaryStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case DECIMAL_STATS:
+          if (field.type == DECIMAL_STATS_FIELD_DESC.type) {
+            DecimalColumnStatsData decimalStats;
+            decimalStats = new DecimalColumnStatsData();
+            decimalStats.read(iprot);
+            return decimalStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case DATE_STATS:
+          if (field.type == DATE_STATS_FIELD_DESC.type) {
+            DateColumnStatsData dateStats;
+            dateStats = new DateColumnStatsData();
+            dateStats.read(iprot);
+            return dateStats;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        default:
+          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
+      }
+    } else {
+      org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+      return null;
+    }
+  }
+
+  @Override
+  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    switch (setField_) {
+      case BOOLEAN_STATS:
+        BooleanColumnStatsData booleanStats = (BooleanColumnStatsData)value_;
+        booleanStats.write(oprot);
+        return;
+      case LONG_STATS:
+        LongColumnStatsData longStats = (LongColumnStatsData)value_;
+        longStats.write(oprot);
+        return;
+      case DOUBLE_STATS:
+        DoubleColumnStatsData doubleStats = (DoubleColumnStatsData)value_;
+        doubleStats.write(oprot);
+        return;
+      case STRING_STATS:
+        StringColumnStatsData stringStats = (StringColumnStatsData)value_;
+        stringStats.write(oprot);
+        return;
+      case BINARY_STATS:
+        BinaryColumnStatsData binaryStats = (BinaryColumnStatsData)value_;
+        binaryStats.write(oprot);
+        return;
+      case DECIMAL_STATS:
+        DecimalColumnStatsData decimalStats = (DecimalColumnStatsData)value_;
+        decimalStats.write(oprot);
+        return;
+      case DATE_STATS:
+        DateColumnStatsData dateStats = (DateColumnStatsData)value_;
+        dateStats.write(oprot);
+        return;
+      default:
+        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
+    }
+  }
+
+  @Override
+  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
+    _Fields setField = _Fields.findByThriftId(fieldID);
+    if (setField != null) {
+      switch (setField) {
+        case BOOLEAN_STATS:
+          BooleanColumnStatsData booleanStats;
+          booleanStats = new BooleanColumnStatsData();
+          booleanStats.read(iprot);
+          return booleanStats;
+        case LONG_STATS:
+          LongColumnStatsData longStats;
+          longStats = new LongColumnStatsData();
+          longStats.read(iprot);
+          return longStats;
+        case DOUBLE_STATS:
+          DoubleColumnStatsData doubleStats;
+          doubleStats = new DoubleColumnStatsData();
+          doubleStats.read(iprot);
+          return doubleStats;
+        case STRING_STATS:
+          StringColumnStatsData stringStats;
+          stringStats = new StringColumnStatsData();
+          stringStats.read(iprot);
+          return stringStats;
+        case BINARY_STATS:
+          BinaryColumnStatsData binaryStats;
+          binaryStats = new BinaryColumnStatsData();
+          binaryStats.read(iprot);
+          return binaryStats;
+        case DECIMAL_STATS:
+          DecimalColumnStatsData decimalStats;
+          decimalStats = new DecimalColumnStatsData();
+          decimalStats.read(iprot);
+          return decimalStats;
+        case DATE_STATS:
+          DateColumnStatsData dateStats;
+          dateStats = new DateColumnStatsData();
+          dateStats.read(iprot);
+          return dateStats;
+        default:
+          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
+      }
+    } else {
+      throw new TProtocolException("Couldn't find a field with field id " + fieldID);
+    }
+  }
+
+  @Override
+  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    switch (setField_) {
+      case BOOLEAN_STATS:
+        BooleanColumnStatsData booleanStats = (BooleanColumnStatsData)value_;
+        booleanStats.write(oprot);
+        return;
+      case LONG_STATS:
+        LongColumnStatsData longStats = (LongColumnStatsData)value_;
+        longStats.write(oprot);
+        return;
+      case DOUBLE_STATS:
+        DoubleColumnStatsData doubleStats = (DoubleColumnStatsData)value_;
+        doubleStats.write(oprot);
+        return;
+      case STRING_STATS:
+        StringColumnStatsData stringStats = (StringColumnStatsData)value_;
+        stringStats.write(oprot);
+        return;
+      case BINARY_STATS:
+        BinaryColumnStatsData binaryStats = (BinaryColumnStatsData)value_;
+        binaryStats.write(oprot);
+        return;
+      case DECIMAL_STATS:
+        DecimalColumnStatsData decimalStats = (DecimalColumnStatsData)value_;
+        decimalStats.write(oprot);
+        return;
+      case DATE_STATS:
+        DateColumnStatsData dateStats = (DateColumnStatsData)value_;
+        dateStats.write(oprot);
+        return;
+      default:
+        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
+    }
+  }
+
+  @Override
+  protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) {
+    switch (setField) {
+      case BOOLEAN_STATS:
+        return BOOLEAN_STATS_FIELD_DESC;
+      case LONG_STATS:
+        return LONG_STATS_FIELD_DESC;
+      case DOUBLE_STATS:
+        return DOUBLE_STATS_FIELD_DESC;
+      case STRING_STATS:
+        return STRING_STATS_FIELD_DESC;
+      case BINARY_STATS:
+        return BINARY_STATS_FIELD_DESC;
+      case DECIMAL_STATS:
+        return DECIMAL_STATS_FIELD_DESC;
+      case DATE_STATS:
+        return DATE_STATS_FIELD_DESC;
+      default:
+        throw new IllegalArgumentException("Unknown field id " + setField);
+    }
+  }
+
+  @Override
+  protected org.apache.thrift.protocol.TStruct getStructDesc() {
+    return STRUCT_DESC;
+  }
+
+  @Override
+  protected _Fields enumForId(short id) {
+    return _Fields.findByThriftIdOrThrow(id);
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+
+  public BooleanColumnStatsData getBooleanStats() {
+    if (getSetField() == _Fields.BOOLEAN_STATS) {
+      return (BooleanColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'booleanStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setBooleanStats(BooleanColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.BOOLEAN_STATS;
+    value_ = value;
+  }
+
+  public LongColumnStatsData getLongStats() {
+    if (getSetField() == _Fields.LONG_STATS) {
+      return (LongColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'longStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setLongStats(LongColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.LONG_STATS;
+    value_ = value;
+  }
+
+  public DoubleColumnStatsData getDoubleStats() {
+    if (getSetField() == _Fields.DOUBLE_STATS) {
+      return (DoubleColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'doubleStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setDoubleStats(DoubleColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.DOUBLE_STATS;
+    value_ = value;
+  }
+
+  public StringColumnStatsData getStringStats() {
+    if (getSetField() == _Fields.STRING_STATS) {
+      return (StringColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'stringStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setStringStats(StringColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.STRING_STATS;
+    value_ = value;
+  }
+
+  public BinaryColumnStatsData getBinaryStats() {
+    if (getSetField() == _Fields.BINARY_STATS) {
+      return (BinaryColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'binaryStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setBinaryStats(BinaryColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.BINARY_STATS;
+    value_ = value;
+  }
+
+  public DecimalColumnStatsData getDecimalStats() {
+    if (getSetField() == _Fields.DECIMAL_STATS) {
+      return (DecimalColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'decimalStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setDecimalStats(DecimalColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.DECIMAL_STATS;
+    value_ = value;
+  }
+
+  public DateColumnStatsData getDateStats() {
+    if (getSetField() == _Fields.DATE_STATS) {
+      return (DateColumnStatsData)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'dateStats' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setDateStats(DateColumnStatsData value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.DATE_STATS;
+    value_ = value;
+  }
+
+  public boolean isSetBooleanStats() {
+    return setField_ == _Fields.BOOLEAN_STATS;
+  }
+
+
+  public boolean isSetLongStats() {
+    return setField_ == _Fields.LONG_STATS;
+  }
+
+
+  public boolean isSetDoubleStats() {
+    return setField_ == _Fields.DOUBLE_STATS;
+  }
+
+
+  public boolean isSetStringStats() {
+    return setField_ == _Fields.STRING_STATS;
+  }
+
+
+  public boolean isSetBinaryStats() {
+    return setField_ == _Fields.BINARY_STATS;
+  }
+
+
+  public boolean isSetDecimalStats() {
+    return setField_ == _Fields.DECIMAL_STATS;
+  }
+
+
+  public boolean isSetDateStats() {
+    return setField_ == _Fields.DATE_STATS;
+  }
+
+
+  public boolean equals(Object other) {
+    if (other instanceof ColumnStatisticsData) {
+      return equals((ColumnStatisticsData)other);
+    } else {
+      return false;
+    }
+  }
+
+  public boolean equals(ColumnStatisticsData other) {
+    return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue());
+  }
+
+  @Override
+  public int compareTo(ColumnStatisticsData other) {
+    int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField());
+    if (lastComparison == 0) {
+      return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue());
+    }
+    return lastComparison;
+  }
+
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+    list.add(this.getClass().getName());
+    org.apache.thrift.TFieldIdEnum setField = getSetField();
+    if (setField != null) {
+      list.add(setField.getThriftFieldId());
+      Object value = getFieldValue();
+      if (value instanceof org.apache.thrift.TEnum) {
+        list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue());
+      } else {
+        list.add(value);
+      }
+    }
+    return list.hashCode();
+  }
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+
+}


[62/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
index 0000000,7cab4fb..1269070
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/derby/hive-schema-4.0.0.derby.sql
@@@ -1,0 -1,710 +1,710 @@@
+ -- Timestamp: 2011-09-22 15:32:02.024
+ -- Source database is: /home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+ -- Connection URL is: jdbc:derby:/home/carl/Work/repos/hive1/metastore/scripts/upgrade/derby/mdb
+ -- Specified schema is: APP
+ -- appendLogs: false
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for functions
+ -- ----------------------------------------------
+ 
+ CREATE FUNCTION "APP"."NUCLEUS_ASCII" (C CHAR(1)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.ascii' ;
+ 
+ CREATE FUNCTION "APP"."NUCLEUS_MATCHES" (TEXT VARCHAR(8000),PATTERN VARCHAR(8000)) RETURNS INTEGER LANGUAGE JAVA PARAMETER STYLE JAVA READS SQL DATA CALLED ON NULL INPUT EXTERNAL NAME 'org.datanucleus.store.rdbms.adapter.DerbySQLFunction.matches' ;
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for tables
+ -- ----------------------------------------------
+ CREATE TABLE "APP"."DBS" (
+   "DB_ID" BIGINT NOT NULL,
+   "DESC" VARCHAR(4000),
+   "DB_LOCATION_URI" VARCHAR(4000) NOT NULL,
+   "NAME" VARCHAR(128),
+   "OWNER_NAME" VARCHAR(128),
+   "OWNER_TYPE" VARCHAR(10),
+   "CTLG_NAME" VARCHAR(256) NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."TBL_PRIVS" ("TBL_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."DATABASE_PARAMS" ("DB_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(180) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."TBL_COL_PRIVS" ("TBL_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "TBL_COL_PRIV" VARCHAR(128), "TBL_ID" BIGINT, "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."SERDE_PARAMS" ("SERDE_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."COLUMNS_V2" ("CD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(4000), "COLUMN_NAME" VARCHAR(767) NOT NULL, "TYPE_NAME" CLOB, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SORT_COLS" ("SD_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "ORDER" INTEGER NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."CDS" ("CD_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."PARTITION_KEY_VALS" ("PART_ID" BIGINT NOT NULL, "PART_KEY_VAL" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."DB_PRIVS" ("DB_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "DB_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."IDXS" ("INDEX_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DEFERRED_REBUILD" CHAR(1) NOT NULL, "INDEX_HANDLER_CLASS" VARCHAR(4000), "INDEX_NAME" VARCHAR(128), "INDEX_TBL_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "ORIG_TBL_ID" BIGINT, "SD_ID" BIGINT);
+ 
+ CREATE TABLE "APP"."INDEX_PARAMS" ("INDEX_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
 -CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT);
++CREATE TABLE "APP"."PARTITIONS" ("PART_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "LAST_ACCESS_TIME" INTEGER NOT NULL, "PART_NAME" VARCHAR(767), "SD_ID" BIGINT, "TBL_ID" BIGINT, "WRITE_ID" BIGINT DEFAULT 0);
+ 
+ CREATE TABLE "APP"."SERDES" ("SERDE_ID" BIGINT NOT NULL, "NAME" VARCHAR(128), "SLIB" VARCHAR(4000), "DESCRIPTION" VARCHAR(4000), "SERIALIZER_CLASS" VARCHAR(4000), "DESERIALIZER_CLASS" VARCHAR(4000), SERDE_TYPE INTEGER);
+ 
+ CREATE TABLE "APP"."PART_PRIVS" ("PART_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."ROLE_MAP" ("ROLE_GRANT_ID" BIGINT NOT NULL, "ADD_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "ROLE_ID" BIGINT);
+ 
+ CREATE TABLE "APP"."TYPES" ("TYPES_ID" BIGINT NOT NULL, "TYPE_NAME" VARCHAR(128), "TYPE1" VARCHAR(767), "TYPE2" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."GLOBAL_PRIVS" ("USER_GRANT_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "USER_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."PARTITION_PARAMS" ("PART_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."PARTITION_EVENTS" (
+     "PART_NAME_ID" BIGINT NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "EVENT_TIME" BIGINT NOT NULL,
+     "EVENT_TYPE" INTEGER NOT NULL,
+     "PARTITION_NAME" VARCHAR(767),
+     "TBL_NAME" VARCHAR(256)
+ );
+ 
+ CREATE TABLE "APP"."COLUMNS" ("SD_ID" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "COLUMN_NAME" VARCHAR(128) NOT NULL, "TYPE_NAME" VARCHAR(4000) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."ROLES" ("ROLE_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "ROLE_NAME" VARCHAR(128));
+ 
 -CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N');
++CREATE TABLE "APP"."TBLS" ("TBL_ID" BIGINT NOT NULL, "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "LAST_ACCESS_TIME" INTEGER NOT NULL, "OWNER" VARCHAR(767), "OWNER_TYPE" VARCHAR(10), "RETENTION" INTEGER NOT NULL, "SD_ID" BIGINT, "TBL_NAME" VARCHAR(256), "TBL_TYPE" VARCHAR(128), "VIEW_EXPANDED_TEXT" LONG VARCHAR, "VIEW_ORIGINAL_TEXT" LONG VARCHAR, "IS_REWRITE_ENABLED" CHAR(1) NOT NULL DEFAULT 'N',  "WRITE_ID" BIGINT DEFAULT 0);
+ 
+ CREATE TABLE "APP"."PARTITION_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."PART_COL_PRIVS" ("PART_COLUMN_GRANT_ID" BIGINT NOT NULL, "COLUMN_NAME" VARCHAR(767), "CREATE_TIME" INTEGER NOT NULL, "GRANT_OPTION" SMALLINT NOT NULL, "GRANTOR" VARCHAR(128), "GRANTOR_TYPE" VARCHAR(128), "PART_ID" BIGINT, "PRINCIPAL_NAME" VARCHAR(128), "PRINCIPAL_TYPE" VARCHAR(128), "PART_COL_PRIV" VARCHAR(128), "AUTHORIZER" VARCHAR(128));
+ 
+ CREATE TABLE "APP"."SDS" ("SD_ID" BIGINT NOT NULL, "INPUT_FORMAT" VARCHAR(4000), "IS_COMPRESSED" CHAR(1) NOT NULL, "LOCATION" VARCHAR(4000), "NUM_BUCKETS" INTEGER NOT NULL, "OUTPUT_FORMAT" VARCHAR(4000), "SERDE_ID" BIGINT, "CD_ID" BIGINT, "IS_STOREDASSUBDIRECTORIES" CHAR(1) NOT NULL);
+ 
+ CREATE TABLE "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME" VARCHAR(256) NOT NULL, "NEXT_VAL" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."TAB_COL_STATS"(
+     "CAT_NAME" VARCHAR(256) NOT NULL,
+     "DB_NAME" VARCHAR(128) NOT NULL,
+     "TABLE_NAME" VARCHAR(256) NOT NULL,
+     "COLUMN_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
+     "LONG_LOW_VALUE" BIGINT,
+     "LONG_HIGH_VALUE" BIGINT,
+     "DOUBLE_LOW_VALUE" DOUBLE,
+     "DOUBLE_HIGH_VALUE" DOUBLE,
+     "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
+     "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
+     "NUM_DISTINCTS" BIGINT,
+     "NUM_NULLS" BIGINT NOT NULL,
+     "AVG_COL_LEN" DOUBLE,
+     "MAX_COL_LEN" BIGINT,
+     "NUM_TRUES" BIGINT,
+     "NUM_FALSES" BIGINT,
+     "LAST_ANALYZED" BIGINT,
+     "CS_ID" BIGINT NOT NULL,
+     "TBL_ID" BIGINT NOT NULL,
+     "BIT_VECTOR" BLOB
+ );
+ 
+ CREATE TABLE "APP"."TABLE_PARAMS" ("TBL_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."BUCKETING_COLS" ("SD_ID" BIGINT NOT NULL, "BUCKET_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."TYPE_FIELDS" ("TYPE_NAME" BIGINT NOT NULL, "COMMENT" VARCHAR(256), "FIELD_NAME" VARCHAR(128) NOT NULL, "FIELD_TYPE" VARCHAR(767) NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."NUCLEUS_TABLES" ("CLASS_NAME" VARCHAR(128) NOT NULL, "TABLE_NAME" VARCHAR(128) NOT NULL, "TYPE" VARCHAR(4) NOT NULL, "OWNER" VARCHAR(2) NOT NULL, "VERSION" VARCHAR(20) NOT NULL, "INTERFACE_NAME" VARCHAR(256) DEFAULT NULL);
+ 
+ CREATE TABLE "APP"."SD_PARAMS" ("SD_ID" BIGINT NOT NULL, "PARAM_KEY" VARCHAR(256) NOT NULL, "PARAM_VALUE" CLOB);
+ 
+ CREATE TABLE "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_STRING_LIST_VALUES" ("STRING_LIST_ID" BIGINT NOT NULL, "STRING_LIST_VALUE" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_COL_NAMES" ("SD_ID" BIGINT NOT NULL, "SKEWED_COL_NAME" VARCHAR(256), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ("SD_ID" BIGINT NOT NULL, "STRING_LIST_ID_KID" BIGINT NOT NULL, "LOCATION" VARCHAR(4000));
+ 
+ CREATE TABLE "APP"."SKEWED_VALUES" ("SD_ID_OID" BIGINT NOT NULL, "STRING_LIST_ID_EID" BIGINT NOT NULL, "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."MASTER_KEYS" ("KEY_ID" INTEGER NOT NULL generated always as identity (start with 1), "MASTER_KEY" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."DELEGATION_TOKENS" ( "TOKEN_IDENT" VARCHAR(767) NOT NULL, "TOKEN" VARCHAR(767));
+ 
+ CREATE TABLE "APP"."PART_COL_STATS"(
+     "CAT_NAME" VARCHAR(256) NOT NULL,
+     "DB_NAME" VARCHAR(128) NOT NULL,
+     "TABLE_NAME" VARCHAR(256) NOT NULL,
+     "PARTITION_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_NAME" VARCHAR(767) NOT NULL,
+     "COLUMN_TYPE" VARCHAR(128) NOT NULL,
+     "LONG_LOW_VALUE" BIGINT,
+     "LONG_HIGH_VALUE" BIGINT,
+     "DOUBLE_LOW_VALUE" DOUBLE,
+     "DOUBLE_HIGH_VALUE" DOUBLE,
+     "BIG_DECIMAL_LOW_VALUE" VARCHAR(4000),
+     "BIG_DECIMAL_HIGH_VALUE" VARCHAR(4000),
+     "NUM_DISTINCTS" BIGINT,
+     "BIT_VECTOR" BLOB,
+     "NUM_NULLS" BIGINT NOT NULL,
+     "AVG_COL_LEN" DOUBLE,
+     "MAX_COL_LEN" BIGINT,
+     "NUM_TRUES" BIGINT,
+     "NUM_FALSES" BIGINT,
+     "LAST_ANALYZED" BIGINT,
+     "CS_ID" BIGINT NOT NULL,
+     "PART_ID" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."VERSION" ("VER_ID" BIGINT NOT NULL, "SCHEMA_VERSION" VARCHAR(127) NOT NULL, "VERSION_COMMENT" VARCHAR(255));
+ 
+ CREATE TABLE "APP"."FUNCS" ("FUNC_ID" BIGINT NOT NULL, "CLASS_NAME" VARCHAR(4000), "CREATE_TIME" INTEGER NOT NULL, "DB_ID" BIGINT, "FUNC_NAME" VARCHAR(128), "FUNC_TYPE" INTEGER NOT NULL, "OWNER_NAME" VARCHAR(128), "OWNER_TYPE" VARCHAR(10));
+ 
+ CREATE TABLE "APP"."FUNC_RU" ("FUNC_ID" BIGINT NOT NULL, "RESOURCE_TYPE" INTEGER NOT NULL, "RESOURCE_URI" VARCHAR(4000), "INTEGER_IDX" INTEGER NOT NULL);
+ 
+ CREATE TABLE "APP"."NOTIFICATION_LOG" (
+     "NL_ID" BIGINT NOT NULL,
+     "CAT_NAME" VARCHAR(256),
+     "DB_NAME" VARCHAR(128),
+     "EVENT_ID" BIGINT NOT NULL,
+     "EVENT_TIME" INTEGER NOT NULL,
+     "EVENT_TYPE" VARCHAR(32) NOT NULL,
+     "MESSAGE" CLOB,
+     "TBL_NAME" VARCHAR(256),
+     "MESSAGE_FORMAT" VARCHAR(16)
+ );
+ 
+ CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."KEY_CONSTRAINTS" ("CHILD_CD_ID" BIGINT, "CHILD_INTEGER_IDX" INTEGER, "CHILD_TBL_ID" BIGINT, "PARENT_CD_ID" BIGINT , "PARENT_INTEGER_IDX" INTEGER, "PARENT_TBL_ID" BIGINT NOT NULL,  "POSITION" BIGINT NOT NULL, "CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "CONSTRAINT_TYPE" SMALLINT NOT NULL, "UPDATE_RULE" SMALLINT, "DELETE_RULE" SMALLINT, "ENABLE_VALIDATE_RELY" SMALLINT NOT NULL, "DEFAULT_VALUE" VARCHAR(400));
+ 
+ CREATE TABLE "APP"."METASTORE_DB_PROPERTIES" ("PROPERTY_KEY" VARCHAR(255) NOT NULL, "PROPERTY_VALUE" VARCHAR(1000) NOT NULL, "DESCRIPTION" VARCHAR(1000));
+ 
+ CREATE TABLE "APP"."WM_RESOURCEPLAN" (RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, QUERY_PARALLELISM INTEGER, STATUS VARCHAR(20) NOT NULL, DEFAULT_POOL_ID BIGINT);
+ 
+ CREATE TABLE "APP"."WM_POOL" (POOL_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, PATH VARCHAR(1024) NOT NULL, ALLOC_FRACTION DOUBLE, QUERY_PARALLELISM INTEGER, SCHEDULING_POLICY VARCHAR(1024));
+ 
+ CREATE TABLE "APP"."WM_TRIGGER" (TRIGGER_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, NAME VARCHAR(128) NOT NULL, TRIGGER_EXPRESSION VARCHAR(1024), ACTION_EXPRESSION VARCHAR(1024), IS_IN_UNMANAGED INTEGER NOT NULL DEFAULT 0);
+ 
+ CREATE TABLE "APP"."WM_POOL_TO_TRIGGER"  (POOL_ID BIGINT NOT NULL, TRIGGER_ID BIGINT NOT NULL);
+ 
+ CREATE TABLE "APP"."WM_MAPPING" (MAPPING_ID BIGINT NOT NULL, RP_ID BIGINT NOT NULL, ENTITY_TYPE VARCHAR(128) NOT NULL, ENTITY_NAME VARCHAR(128) NOT NULL, POOL_ID BIGINT, ORDERING INTEGER);
+ 
+ CREATE TABLE "APP"."MV_CREATION_METADATA" (
+   "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
+   "CAT_NAME" VARCHAR(256) NOT NULL,
+   "DB_NAME" VARCHAR(128) NOT NULL,
+   "TBL_NAME" VARCHAR(256) NOT NULL,
+   "TXN_LIST" CLOB
+ );
+ 
+ CREATE TABLE "APP"."MV_TABLES_USED" (
+   "MV_CREATION_METADATA_ID" BIGINT NOT NULL,
+   "TBL_ID" BIGINT NOT NULL
+ );
+ 
+ CREATE TABLE "APP"."CTLGS" (
+     "CTLG_ID" BIGINT NOT NULL,
+     "NAME" VARCHAR(256) UNIQUE,
+     "DESC" VARCHAR(4000),
+     "LOCATION_URI" VARCHAR(4000) NOT NULL);
+ 
+ -- ----------------------------------------------
+ -- DML Statements
+ -- ----------------------------------------------
+ 
+ INSERT INTO "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID", "NEXT_EVENT_ID") SELECT * FROM (VALUES (1,1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_EVENT_ID" FROM "APP"."NOTIFICATION_SEQUENCE");
+ 
+ INSERT INTO "APP"."SEQUENCE_TABLE" ("SEQUENCE_NAME", "NEXT_VAL") SELECT * FROM (VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1)) tmp_table WHERE NOT EXISTS ( SELECT "NEXT_VAL" FROM "APP"."SEQUENCE_TABLE" WHERE "SEQUENCE_NAME" = 'org.apache.hadoop.hive.metastore.model.MNotificationLog');
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for indexes
+ -- ----------------------------------------------
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEINDEX" ON "APP"."IDXS" ("INDEX_NAME", "ORIG_TBL_ID");
+ 
+ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("AUTHORIZER", "TBL_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("AUTHORIZER", "DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("CAT_NAME", "DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME");
+ 
+ CREATE INDEX "APP"."TAB_COL_STATS_IDX" ON "APP"."TAB_COL_STATS" ("CAT_NAME", "DB_NAME", "TABLE_NAME", "COLUMN_NAME");
+ 
+ CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("AUTHORIZER", "PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME");
+ 
+ CREATE INDEX "APP"."TABLEPRIVILEGEINDEX" ON "APP"."TBL_PRIVS" ("AUTHORIZER", "TBL_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "TBL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUETABLE" ON "APP"."TBLS" ("TBL_NAME", "DB_ID");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_DATABASE" ON "APP"."DBS" ("NAME", "CTLG_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."USERROLEMAPINDEX" ON "APP"."ROLE_MAP" ("PRINCIPAL_NAME", "ROLE_ID", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."GLOBALPRIVILEGEINDEX" ON "APP"."GLOBAL_PRIVS" ("AUTHORIZER", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "USER_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_TYPE" ON "APP"."TYPES" ("TYPE_NAME");
+ 
+ CREATE INDEX "APP"."PARTITIONCOLUMNPRIVILEGEINDEX" ON "APP"."PART_COL_PRIVS" ("AUTHORIZER", "PART_ID", "COLUMN_NAME", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_COL_PRIV", "GRANTOR", "GRANTOR_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEPARTITION" ON "APP"."PARTITIONS" ("PART_NAME", "TBL_ID");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUEFUNCTION" ON "APP"."FUNCS" ("FUNC_NAME", "DB_ID");
+ 
+ CREATE INDEX "APP"."FUNCS_N49" ON "APP"."FUNCS" ("DB_ID");
+ 
+ CREATE INDEX "APP"."FUNC_RU_N49" ON "APP"."FUNC_RU" ("FUNC_ID");
+ 
+ CREATE INDEX "APP"."CONSTRAINTS_PARENT_TBL_ID_INDEX" ON "APP"."KEY_CONSTRAINTS"("PARENT_TBL_ID");
+ 
+ CREATE INDEX "APP"."CONSTRAINTS_CONSTRAINT_TYPE_INDEX" ON "APP"."KEY_CONSTRAINTS"("CONSTRAINT_TYPE");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_RESOURCEPLAN" ON "APP"."WM_RESOURCEPLAN" ("NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_POOL" ON "APP"."WM_POOL" ("RP_ID", "PATH");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_TRIGGER" ON "APP"."WM_TRIGGER" ("RP_ID", "NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_WM_MAPPING" ON "APP"."WM_MAPPING" ("RP_ID", "ENTITY_TYPE", "ENTITY_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."MV_UNIQUE_TABLE" ON "APP"."MV_CREATION_METADATA" ("TBL_NAME", "DB_NAME");
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_CATALOG" ON "APP"."CTLGS" ("NAME");
+ 
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for keys
+ -- ----------------------------------------------
+ 
+ -- primary/unique
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_PK" PRIMARY KEY ("INDEX_ID");
+ 
+ ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_PK" PRIMARY KEY ("TBL_COLUMN_GRANT_ID");
+ 
+ ALTER TABLE "APP"."CDS" ADD CONSTRAINT "SQL110922153006460" PRIMARY KEY ("CD_ID");
+ 
+ ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_PK" PRIMARY KEY ("DB_GRANT_ID");
+ 
+ ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_PK" PRIMARY KEY ("INDEX_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME");
+ 
+ ALTER TABLE "APP"."SEQUENCE_TABLE" ADD CONSTRAINT "SEQUENCE_TABLE_PK" PRIMARY KEY ("SEQUENCE_NAME");
+ 
+ ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_PK" PRIMARY KEY ("PART_GRANT_ID");
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_PK" PRIMARY KEY ("SD_ID");
+ 
+ ALTER TABLE "APP"."SERDES" ADD CONSTRAINT "SERDES_PK" PRIMARY KEY ("SERDE_ID");
+ 
+ ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_PK" PRIMARY KEY ("SD_ID", "COLUMN_NAME");
+ 
+ ALTER TABLE "APP"."PARTITION_EVENTS" ADD CONSTRAINT "PARTITION_EVENTS_PK" PRIMARY KEY ("PART_NAME_ID");
+ 
+ ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_PK" PRIMARY KEY ("TYPE_NAME", "FIELD_NAME");
+ 
+ ALTER TABLE "APP"."ROLES" ADD CONSTRAINT "ROLES_PK" PRIMARY KEY ("ROLE_ID");
+ 
+ ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_PK" PRIMARY KEY ("TBL_GRANT_ID");
+ 
+ ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_PK" PRIMARY KEY ("SERDE_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."NUCLEUS_TABLES" ADD CONSTRAINT "NUCLEUS_TABLES_PK" PRIMARY KEY ("CLASS_NAME");
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_PK" PRIMARY KEY ("TBL_ID");
+ 
+ ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_PK" PRIMARY KEY ("SD_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_PK" PRIMARY KEY ("DB_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_PK" PRIMARY KEY ("DB_ID");
+ 
+ ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_PK" PRIMARY KEY ("ROLE_GRANT_ID");
+ 
+ ALTER TABLE "APP"."GLOBAL_PRIVS" ADD CONSTRAINT "GLOBAL_PRIVS_PK" PRIMARY KEY ("USER_GRANT_ID");
+ 
+ ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_PK" PRIMARY KEY ("PART_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."TYPES" ADD CONSTRAINT "TYPES_PK" PRIMARY KEY ("TYPES_ID");
+ 
+ ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "SQL110922153006740" PRIMARY KEY ("CD_ID", "COLUMN_NAME");
+ 
+ ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_PK" PRIMARY KEY ("PART_COLUMN_GRANT_ID");
+ 
+ ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_PK" PRIMARY KEY ("PART_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_PK" PRIMARY KEY ("PART_ID");
+ 
+ ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_PK" PRIMARY KEY ("TBL_ID", "PARAM_KEY");
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST" ADD CONSTRAINT "SKEWED_STRING_LIST_PK" PRIMARY KEY ("STRING_LIST_ID");
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_PK" PRIMARY KEY ("STRING_LIST_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_PK" PRIMARY KEY ("SD_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_PK" PRIMARY KEY ("SD_ID", "STRING_LIST_ID_KID");
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_PK" PRIMARY KEY ("SD_ID_OID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+ 
+ ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_PK" PRIMARY KEY ("CS_ID");
+ 
+ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_PK" PRIMARY KEY ("FUNC_ID");
+ 
+ ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_PK" PRIMARY KEY ("FUNC_ID", "INTEGER_IDX");
+ 
+ ALTER TABLE "APP"."NOTIFICATION_LOG" ADD CONSTRAINT "NOTIFICATION_LOG_PK" PRIMARY KEY ("NL_ID");
+ 
+ ALTER TABLE "APP"."NOTIFICATION_SEQUENCE" ADD CONSTRAINT "NOTIFICATION_SEQUENCE_PK" PRIMARY KEY ("NNI_ID");
+ 
+ ALTER TABLE "APP"."KEY_CONSTRAINTS" ADD CONSTRAINT "CONSTRAINTS_PK" PRIMARY KEY ("CONSTRAINT_NAME", "POSITION");
+ 
+ ALTER TABLE "APP"."METASTORE_DB_PROPERTIES" ADD CONSTRAINT "PROPERTY_KEY_PK" PRIMARY KEY ("PROPERTY_KEY");
+ 
+ ALTER TABLE "APP"."MV_CREATION_METADATA" ADD CONSTRAINT "MV_CREATION_METADATA_PK" PRIMARY KEY ("MV_CREATION_METADATA_ID");
+ 
+ ALTER TABLE "APP"."CTLGS" ADD CONSTRAINT "CTLG_PK" PRIMARY KEY ("CTLG_ID");
+ 
+ 
+ -- foreign
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK1" FOREIGN KEY ("ORIG_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "IDXS_FK3" FOREIGN KEY ("INDEX_TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBL_COL_PRIVS" ADD CONSTRAINT "TBL_COL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DB_PRIVS" ADD CONSTRAINT "DB_PRIVS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."INDEX_PARAMS" ADD CONSTRAINT "INDEX_PARAMS_FK1" FOREIGN KEY ("INDEX_ID") REFERENCES "APP"."IDXS" ("INDEX_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_KEYS" ADD CONSTRAINT "PARTITION_KEYS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_PRIVS" ADD CONSTRAINT "PART_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SDS_FK2" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."COLUMNS" ADD CONSTRAINT "COLUMNS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TYPE_FIELDS" ADD CONSTRAINT "TYPE_FIELDS_FK1" FOREIGN KEY ("TYPE_NAME") REFERENCES "APP"."TYPES" ("TYPES_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBL_PRIVS" ADD CONSTRAINT "TBL_PRIVS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SERDE_PARAMS" ADD CONSTRAINT "SERDE_PARAMS_FK1" FOREIGN KEY ("SERDE_ID") REFERENCES "APP"."SERDES" ("SERDE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TBLS" ADD CONSTRAINT "TBLS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SD_PARAMS" ADD CONSTRAINT "SD_PARAMS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DATABASE_PARAMS" ADD CONSTRAINT "DATABASE_PARAMS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."ROLE_MAP" ADD CONSTRAINT "ROLE_MAP_FK1" FOREIGN KEY ("ROLE_ID") REFERENCES "APP"."ROLES" ("ROLE_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."BUCKETING_COLS" ADD CONSTRAINT "BUCKETING_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SORT_COLS" ADD CONSTRAINT "SORT_COLS_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_KEY_VALS" ADD CONSTRAINT "PARTITION_KEY_VALS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."COLUMNS_V2" ADD CONSTRAINT "COLUMNS_V2_FK1" FOREIGN KEY ("CD_ID") REFERENCES "APP"."CDS" ("CD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_COL_PRIVS" ADD CONSTRAINT "PART_COL_PRIVS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITION_PARAMS" ADD CONSTRAINT "PARTITION_PARAMS_FK1" FOREIGN KEY ("PART_ID") REFERENCES "APP"."PARTITIONS" ("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PARTITIONS" ADD CONSTRAINT "PARTITIONS_FK2" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TABLE_PARAMS" ADD CONSTRAINT "TABLE_PARAMS_FK1" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_STRING_LIST_VALUES" ADD CONSTRAINT "SKEWED_STRING_LIST_VALUES_FK1" FOREIGN KEY ("STRING_LIST_ID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_NAMES" ADD CONSTRAINT "SKEWED_COL_NAMES_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK1" FOREIGN KEY ("SD_ID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_COL_VALUE_LOC_MAP" ADD CONSTRAINT "SKEWED_COL_VALUE_LOC_MAP_FK2" FOREIGN KEY ("STRING_LIST_ID_KID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK1" FOREIGN KEY ("SD_ID_OID") REFERENCES "APP"."SDS" ("SD_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."SKEWED_VALUES" ADD CONSTRAINT "SKEWED_VALUES_FK2" FOREIGN KEY ("STRING_LIST_ID_EID") REFERENCES "APP"."SKEWED_STRING_LIST" ("STRING_LIST_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."TAB_COL_STATS" ADD CONSTRAINT "TAB_COL_STATS_FK" FOREIGN KEY ("TBL_ID") REFERENCES TBLS("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."PART_COL_STATS" ADD CONSTRAINT "PART_COL_STATS_FK" FOREIGN KEY ("PART_ID") REFERENCES PARTITIONS("PART_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."VERSION" ADD CONSTRAINT "VERSION_PK" PRIMARY KEY ("VER_ID");
+ 
+ ALTER TABLE "APP"."FUNCS" ADD CONSTRAINT "FUNCS_FK1" FOREIGN KEY ("DB_ID") REFERENCES "APP"."DBS" ("DB_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."FUNC_RU" ADD CONSTRAINT "FUNC_RU_FK1" FOREIGN KEY ("FUNC_ID") REFERENCES "APP"."FUNCS" ("FUNC_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_PK" PRIMARY KEY ("RP_ID");
+ 
+ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_PK" PRIMARY KEY ("POOL_ID");
+ 
+ ALTER TABLE "APP"."WM_POOL" ADD CONSTRAINT "WM_POOL_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_RESOURCEPLAN" ADD CONSTRAINT "WM_RESOURCEPLAN_FK1" FOREIGN KEY ("DEFAULT_POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_PK" PRIMARY KEY ("TRIGGER_ID");
+ 
+ ALTER TABLE "APP"."WM_TRIGGER" ADD CONSTRAINT "WM_TRIGGER_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK1" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_POOL_TO_TRIGGER" ADD CONSTRAINT "WM_POOL_TO_TRIGGER_FK2" FOREIGN KEY ("TRIGGER_ID") REFERENCES "APP"."WM_TRIGGER" ("TRIGGER_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_PK" PRIMARY KEY ("MAPPING_ID");
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK1" FOREIGN KEY ("RP_ID") REFERENCES "APP"."WM_RESOURCEPLAN" ("RP_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."WM_MAPPING" ADD CONSTRAINT "WM_MAPPING_FK2" FOREIGN KEY ("POOL_ID") REFERENCES "APP"."WM_POOL" ("POOL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK1" FOREIGN KEY ("MV_CREATION_METADATA_ID") REFERENCES "APP"."MV_CREATION_METADATA" ("MV_CREATION_METADATA_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."MV_TABLES_USED" ADD CONSTRAINT "MV_TABLES_USED_FK2" FOREIGN KEY ("TBL_ID") REFERENCES "APP"."TBLS" ("TBL_ID") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ ALTER TABLE "APP"."DBS" ADD CONSTRAINT "DBS_CTLG_FK" FOREIGN KEY ("CTLG_NAME") REFERENCES "APP"."CTLGS" ("NAME") ON DELETE NO ACTION ON UPDATE NO ACTION;
+ 
+ -- ----------------------------------------------
+ -- DDL Statements for checks
+ -- ----------------------------------------------
+ 
+ ALTER TABLE "APP"."IDXS" ADD CONSTRAINT "SQL110318025504980" CHECK (DEFERRED_REBUILD IN ('Y','N'));
+ 
+ ALTER TABLE "APP"."SDS" ADD CONSTRAINT "SQL110318025505550" CHECK (IS_COMPRESSED IN ('Y','N'));
+ 
+ -- ----------------------------
+ -- Transaction and Lock Tables
+ -- ----------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT integer,
+   TXN_TYPE integer
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL REFERENCES TXNS (TXN_ID),
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767),
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint
+ );
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767),
+   HL_LOCK_STATE char(1) NOT NULL,
+   HL_LOCK_TYPE char(1) NOT NULL,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT integer,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID)
+ );
+ 
+ CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO varchar(2048) for bit data,
+   CQ_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ );
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO varchar(2048) for bit data,
+   CC_HADOOP_JOB_ID varchar(32)
+ );
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ );
+ 
+ --1st 4 cols make up a PK but since WS_PARTITION is nullable we can't declare such PK
+ --This is a good candidate for Index orgainzed table
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE "APP"."I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" integer not null,
+   "NAME" varchar(256) unique,
+   "DB_ID" bigint references "APP"."DBS" ("DB_ID"),
+   "COMPATIBILITY" integer not null,
+   "VALIDATION_LEVEL" integer not null,
+   "CAN_EVOLVE" char(1) not null,
+   "SCHEMA_GROUP" varchar(256),
+   "DESCRIPTION" varchar(4000)
+ );
+ 
+ CREATE TABLE "APP"."SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "APP"."I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" integer not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "APP"."CDS" ("CD_ID"),
+   "STATE" integer not null,
+   "DESCRIPTION" varchar(4000),
+   "SCHEMA_TEXT" clob,
+   "FINGERPRINT" varchar(256),
+   "SCHEMA_VERSION_NAME" varchar(256),
+   "SERDE_ID" bigint references "APP"."SERDES" ("SERDE_ID")
+ );
+ 
+ CREATE UNIQUE INDEX "APP"."UNIQUE_SCHEMA_VERSION" ON "APP"."SCHEMA_VERSION" ("SCHEMA_ID", "VERSION");
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ );
+ 
+ CREATE TABLE "APP"."RUNTIME_STATS" (
+   "RS_ID" bigint primary key,
+   "CREATE_TIME" integer not null,
+   "WEIGHT" integer not null,
+   "PAYLOAD" BLOB
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ clob NOT NULL,
+   WNL_PARTITION_OBJ clob,
+   WNL_FILES clob,
+   WNL_EVENT_TIME integer NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ );
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO "APP"."VERSION" (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
index 0000000,a511376..d4fb299
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql
@@@ -1,0 -1,6 +1,8 @@@
+ -- Upgrade MetaStore schema from 3.1.0 to 4.0.0
 -
++-- HIVE-19416
++ALTER TABLE "APP"."TBLS" ADD WRITE_ID bigint DEFAULT 0;
++ALTER TABLE "APP"."PARTITIONS" ADD WRITE_ID bigint DEFAULT 0;
+ 
+ -- This needs to be the last thing done.  Insert any changes above this line.
+ UPDATE "APP".VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
index 0000000,a81fc40..ad78ba6
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mssql/hive-schema-4.0.0.mssql.sql
@@@ -1,0 -1,1272 +1,1272 @@@
+ -- Licensed to the Apache Software Foundation (ASF) under one or more
+ -- contributor license agreements.  See the NOTICE file distributed with
+ -- this work for additional information regarding copyright ownership.
+ -- The ASF licenses this file to You under the Apache License, Version 2.0
+ -- (the "License"); you may not use this file except in compliance with
+ -- the License.  You may obtain a copy of the License at
+ --
+ --     http://www.apache.org/licenses/LICENSE-2.0
+ --
+ -- Unless required by applicable law or agreed to in writing, software
+ -- distributed under the License is distributed on an "AS IS" BASIS,
+ -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ -- See the License for the specific language governing permissions and
+ -- limitations under the License.
+ 
+ ------------------------------------------------------------------
+ -- DataNucleus SchemaTool (ran at 08/04/2014 15:10:15)
+ ------------------------------------------------------------------
+ -- Complete schema required for the following classes:-
+ --     org.apache.hadoop.hive.metastore.model.MColumnDescriptor
+ --     org.apache.hadoop.hive.metastore.model.MDBPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MDatabase
+ --     org.apache.hadoop.hive.metastore.model.MDelegationToken
+ --     org.apache.hadoop.hive.metastore.model.MFieldSchema
+ --     org.apache.hadoop.hive.metastore.model.MFunction
+ --     org.apache.hadoop.hive.metastore.model.MGlobalPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MIndex
+ --     org.apache.hadoop.hive.metastore.model.MMasterKey
+ --     org.apache.hadoop.hive.metastore.model.MOrder
+ --     org.apache.hadoop.hive.metastore.model.MPartition
+ --     org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics
+ --     org.apache.hadoop.hive.metastore.model.MPartitionEvent
+ --     org.apache.hadoop.hive.metastore.model.MPartitionPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MResourceUri
+ --     org.apache.hadoop.hive.metastore.model.MRole
+ --     org.apache.hadoop.hive.metastore.model.MRoleMap
+ --     org.apache.hadoop.hive.metastore.model.MSerDeInfo
+ --     org.apache.hadoop.hive.metastore.model.MStorageDescriptor
+ --     org.apache.hadoop.hive.metastore.model.MStringList
+ --     org.apache.hadoop.hive.metastore.model.MTable
+ --     org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege
+ --     org.apache.hadoop.hive.metastore.model.MTableColumnStatistics
+ --     org.apache.hadoop.hive.metastore.model.MTablePrivilege
+ --     org.apache.hadoop.hive.metastore.model.MType
+ --     org.apache.hadoop.hive.metastore.model.MVersionTable
+ --
+ -- Table MASTER_KEYS for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ CREATE TABLE MASTER_KEYS
+ (
+     KEY_ID int NOT NULL,
+     MASTER_KEY nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE MASTER_KEYS ADD CONSTRAINT MASTER_KEYS_PK PRIMARY KEY (KEY_ID);
+ 
+ -- Table IDXS for classes [org.apache.hadoop.hive.metastore.model.MIndex]
+ CREATE TABLE IDXS
+ (
+     INDEX_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DEFERRED_REBUILD bit NOT NULL,
+     INDEX_HANDLER_CLASS nvarchar(4000) NULL,
+     INDEX_NAME nvarchar(128) NULL,
+     INDEX_TBL_ID bigint NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     ORIG_TBL_ID bigint NULL,
+     SD_ID bigint NULL
+ );
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_PK PRIMARY KEY (INDEX_ID);
+ 
+ -- Table PART_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ CREATE TABLE PART_COL_STATS
+ (
+     CS_ID bigint NOT NULL,
+     AVG_COL_LEN float NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     COLUMN_TYPE nvarchar(128) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+     DOUBLE_HIGH_VALUE float NULL,
+     DOUBLE_LOW_VALUE float NULL,
+     LAST_ANALYZED bigint NOT NULL,
+     LONG_HIGH_VALUE bigint NULL,
+     LONG_LOW_VALUE bigint NULL,
+     MAX_COL_LEN bigint NULL,
+     NUM_DISTINCTS bigint NULL,
+     BIT_VECTOR varbinary(max) NULL,
+     NUM_FALSES bigint NULL,
+     NUM_NULLS bigint NOT NULL,
+     NUM_TRUES bigint NULL,
+     PART_ID bigint NULL,
+     PARTITION_NAME nvarchar(767) NOT NULL,
+     "TABLE_NAME" nvarchar(256) NOT NULL,
+     "CAT_NAME" nvarchar(256) NOT NULL
+ );
+ 
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_PK PRIMARY KEY (CS_ID);
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME);
+ 
+ -- Table PART_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ CREATE TABLE PART_PRIVS
+ (
+     PART_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PART_ID bigint NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     PART_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_PK PRIMARY KEY (PART_GRANT_ID);
+ 
+ -- Table SKEWED_STRING_LIST for classes [org.apache.hadoop.hive.metastore.model.MStringList]
+ CREATE TABLE SKEWED_STRING_LIST
+ (
+     STRING_LIST_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST ADD CONSTRAINT SKEWED_STRING_LIST_PK PRIMARY KEY (STRING_LIST_ID);
+ 
+ -- Table ROLES for classes [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE TABLE ROLES
+ (
+     ROLE_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     ROLE_NAME nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE ROLES ADD CONSTRAINT ROLES_PK PRIMARY KEY (ROLE_ID);
+ 
+ -- Table PARTITIONS for classes [org.apache.hadoop.hive.metastore.model.MPartition]
+ CREATE TABLE PARTITIONS
+ (
+     PART_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     PART_NAME nvarchar(767) NULL,
+     SD_ID bigint NULL,
 -    TBL_ID bigint NULL
 -);
++    TBL_ID bigint NULL,
++    WRITE_ID bigint NULL);
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_PK PRIMARY KEY (PART_ID);
+ 
+ -- Table CDS for classes [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+ CREATE TABLE CDS
+ (
+     CD_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE CDS ADD CONSTRAINT CDS_PK PRIMARY KEY (CD_ID);
+ 
+ -- Table VERSION for classes [org.apache.hadoop.hive.metastore.model.MVersionTable]
+ CREATE TABLE VERSION
+ (
+     VER_ID bigint NOT NULL,
+     SCHEMA_VERSION nvarchar(127) NOT NULL,
+     VERSION_COMMENT nvarchar(255) NOT NULL
+ );
+ 
+ ALTER TABLE VERSION ADD CONSTRAINT VERSION_PK PRIMARY KEY (VER_ID);
+ 
+ -- Table GLOBAL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE TABLE GLOBAL_PRIVS
+ (
+     USER_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     USER_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE GLOBAL_PRIVS ADD CONSTRAINT GLOBAL_PRIVS_PK PRIMARY KEY (USER_GRANT_ID);
+ 
+ -- Table PART_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ CREATE TABLE PART_COL_PRIVS
+ (
+     PART_COLUMN_GRANT_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PART_ID bigint NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     PART_COL_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_PK PRIMARY KEY (PART_COLUMN_GRANT_ID);
+ 
+ -- Table DB_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ CREATE TABLE DB_PRIVS
+ (
+     DB_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     DB_PRIV nvarchar(128) NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_PK PRIMARY KEY (DB_GRANT_ID);
+ 
+ -- Table TAB_COL_STATS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ CREATE TABLE TAB_COL_STATS
+ (
+     CS_ID bigint NOT NULL,
+     AVG_COL_LEN float NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     COLUMN_TYPE nvarchar(128) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     BIG_DECIMAL_HIGH_VALUE nvarchar(255) NULL,
+     BIG_DECIMAL_LOW_VALUE nvarchar(255) NULL,
+     DOUBLE_HIGH_VALUE float NULL,
+     DOUBLE_LOW_VALUE float NULL,
+     LAST_ANALYZED bigint NOT NULL,
+     LONG_HIGH_VALUE bigint NULL,
+     LONG_LOW_VALUE bigint NULL,
+     MAX_COL_LEN bigint NULL,
+     NUM_DISTINCTS bigint NULL,
+     BIT_VECTOR varbinary(max) NULL,
+     NUM_FALSES bigint NULL,
+     NUM_NULLS bigint NOT NULL,
+     NUM_TRUES bigint NULL,
+     TBL_ID bigint NULL,
+     "TABLE_NAME" nvarchar(256) NOT NULL,
+     "CAT_NAME" nvarchar(256) NOT NULL
+ );
+ 
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_PK PRIMARY KEY (CS_ID);
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME);
+ 
+ 
+ -- Table TYPES for classes [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE TABLE TYPES
+ (
+     TYPES_ID bigint NOT NULL,
+     TYPE_NAME nvarchar(128) NULL,
+     TYPE1 nvarchar(767) NULL,
+     TYPE2 nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE TYPES ADD CONSTRAINT TYPES_PK PRIMARY KEY (TYPES_ID);
+ 
+ -- Table TBL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ CREATE TABLE TBL_PRIVS
+ (
+     TBL_GRANT_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     TBL_PRIV nvarchar(128) NULL,
+     TBL_ID bigint NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_PK PRIMARY KEY (TBL_GRANT_ID);
+ 
+ -- Table DBS for classes [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE TABLE DBS
+ (
+     DB_ID bigint NOT NULL,
+     "DESC" nvarchar(4000) NULL,
+     DB_LOCATION_URI nvarchar(4000) NOT NULL,
+     "NAME" nvarchar(128) NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     OWNER_TYPE nvarchar(10) NULL,
+     CTLG_NAME nvarchar(256)
+ );
+ 
+ ALTER TABLE DBS ADD CONSTRAINT DBS_PK PRIMARY KEY (DB_ID);
+ 
+ -- Table TBL_COL_PRIVS for classes [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ CREATE TABLE TBL_COL_PRIVS
+ (
+     TBL_COLUMN_GRANT_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     CREATE_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     TBL_COL_PRIV nvarchar(128) NULL,
+     TBL_ID bigint NULL,
+     AUTHORIZER nvarchar(128) NULL
+ );
+ 
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_PK PRIMARY KEY (TBL_COLUMN_GRANT_ID);
+ 
+ -- Table DELEGATION_TOKENS for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ CREATE TABLE DELEGATION_TOKENS
+ (
+     TOKEN_IDENT nvarchar(767) NOT NULL,
+     TOKEN nvarchar(767) NULL
+ );
+ 
+ ALTER TABLE DELEGATION_TOKENS ADD CONSTRAINT DELEGATION_TOKENS_PK PRIMARY KEY (TOKEN_IDENT);
+ 
+ -- Table SERDES for classes [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ CREATE TABLE SERDES
+ (
+     SERDE_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NULL,
+     SLIB nvarchar(4000) NULL,
+     "DESCRIPTION" nvarchar(4000),
+     "SERIALIZER_CLASS" nvarchar(4000),
+     "DESERIALIZER_CLASS" nvarchar(4000),
+     "SERDE_TYPE" int
+ );
+ 
+ ALTER TABLE SERDES ADD CONSTRAINT SERDES_PK PRIMARY KEY (SERDE_ID);
+ 
+ -- Table FUNCS for classes [org.apache.hadoop.hive.metastore.model.MFunction]
+ CREATE TABLE FUNCS
+ (
+     FUNC_ID bigint NOT NULL,
+     CLASS_NAME nvarchar(4000) NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     FUNC_NAME nvarchar(128) NULL,
+     FUNC_TYPE int NOT NULL,
+     OWNER_NAME nvarchar(128) NULL,
+     OWNER_TYPE nvarchar(10) NULL
+ );
+ 
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_PK PRIMARY KEY (FUNC_ID);
+ 
+ -- Table ROLE_MAP for classes [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ CREATE TABLE ROLE_MAP
+ (
+     ROLE_GRANT_ID bigint NOT NULL,
+     ADD_TIME int NOT NULL,
+     GRANT_OPTION smallint NOT NULL CHECK (GRANT_OPTION IN (0,1)),
+     GRANTOR nvarchar(128) NULL,
+     GRANTOR_TYPE nvarchar(128) NULL,
+     PRINCIPAL_NAME nvarchar(128) NULL,
+     PRINCIPAL_TYPE nvarchar(128) NULL,
+     ROLE_ID bigint NULL
+ );
+ 
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_PK PRIMARY KEY (ROLE_GRANT_ID);
+ 
+ -- Table TBLS for classes [org.apache.hadoop.hive.metastore.model.MTable]
+ CREATE TABLE TBLS
+ (
+     TBL_ID bigint NOT NULL,
+     CREATE_TIME int NOT NULL,
+     DB_ID bigint NULL,
+     LAST_ACCESS_TIME int NOT NULL,
+     OWNER nvarchar(767) NULL,
+     OWNER_TYPE nvarchar(10) NULL,
+     RETENTION int NOT NULL,
+     SD_ID bigint NULL,
+     TBL_NAME nvarchar(256) NULL,
+     TBL_TYPE nvarchar(128) NULL,
+     VIEW_EXPANDED_TEXT text NULL,
+     VIEW_ORIGINAL_TEXT text NULL,
 -    IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0
 -);
++    IS_REWRITE_ENABLED bit NOT NULL DEFAULT 0,
++    WRITE_ID bigint NULL);
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_PK PRIMARY KEY (TBL_ID);
+ 
+ -- Table MV_CREATION_METADATA for classes [org.apache.hadoop.hive.metastore.model.MCreationMetadata]
+ CREATE TABLE MV_CREATION_METADATA
+ (
+     MV_CREATION_METADATA_ID bigint NOT NULL,
+     CAT_NAME nvarchar(256) NOT NULL,
+     DB_NAME nvarchar(128) NOT NULL,
+     TBL_NAME nvarchar(256) NOT NULL,
+     TXN_LIST text NULL
+ );
+ 
+ ALTER TABLE MV_CREATION_METADATA ADD CONSTRAINT MV_CREATION_METADATA_PK PRIMARY KEY (MV_CREATION_METADATA_ID);
+ CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME,DB_NAME);
+ 
+ 
+ CREATE TABLE MV_TABLES_USED
+ (
+     MV_CREATION_METADATA_ID bigint NOT NULL,
+     TBL_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(MV_CREATION_METADATA_ID) REFERENCES MV_CREATION_METADATA (MV_CREATION_METADATA_ID);
+ ALTER TABLE MV_TABLES_USED WITH CHECK ADD FOREIGN KEY(TBL_ID) REFERENCES TBLS (TBL_ID);
+ 
+ -- Table SDS for classes [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ CREATE TABLE SDS
+ (
+     SD_ID bigint NOT NULL,
+     CD_ID bigint NULL,
+     INPUT_FORMAT nvarchar(4000) NULL,
+     IS_COMPRESSED bit NOT NULL,
+     IS_STOREDASSUBDIRECTORIES bit NOT NULL,
+     LOCATION nvarchar(4000) NULL,
+     NUM_BUCKETS int NOT NULL,
+     OUTPUT_FORMAT nvarchar(4000) NULL,
+     SERDE_ID bigint NULL
+ );
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_PK PRIMARY KEY (SD_ID);
+ 
+ -- Table PARTITION_EVENTS for classes [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE TABLE PARTITION_EVENTS
+ (
+     PART_NAME_ID bigint NOT NULL,
+     CAT_NAME nvarchar(256) NULL,
+     DB_NAME nvarchar(128) NULL,
+     EVENT_TIME bigint NOT NULL,
+     EVENT_TYPE int NOT NULL,
+     PARTITION_NAME nvarchar(767) NULL,
+     TBL_NAME nvarchar(256) NULL
+ );
+ 
+ ALTER TABLE PARTITION_EVENTS ADD CONSTRAINT PARTITION_EVENTS_PK PRIMARY KEY (PART_NAME_ID);
+ 
+ -- Table SORT_COLS for join relationship
+ CREATE TABLE SORT_COLS
+ (
+     SD_ID bigint NOT NULL,
+     "COLUMN_NAME" nvarchar(767) NULL,
+     "ORDER" int NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table SKEWED_COL_NAMES for join relationship
+ CREATE TABLE SKEWED_COL_NAMES
+ (
+     SD_ID bigint NOT NULL,
+     SKEWED_COL_NAME nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table SKEWED_COL_VALUE_LOC_MAP for join relationship
+ CREATE TABLE SKEWED_COL_VALUE_LOC_MAP
+ (
+     SD_ID bigint NOT NULL,
+     STRING_LIST_ID_KID bigint NOT NULL,
+     LOCATION nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_PK PRIMARY KEY (SD_ID,STRING_LIST_ID_KID);
+ 
+ -- Table SKEWED_STRING_LIST_VALUES for join relationship
+ CREATE TABLE SKEWED_STRING_LIST_VALUES
+ (
+     STRING_LIST_ID bigint NOT NULL,
+     STRING_LIST_VALUE nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_PK PRIMARY KEY (STRING_LIST_ID,INTEGER_IDX);
+ 
+ -- Table PARTITION_KEY_VALS for join relationship
+ CREATE TABLE PARTITION_KEY_VALS
+ (
+     PART_ID bigint NOT NULL,
+     PART_KEY_VAL nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_PK PRIMARY KEY (PART_ID,INTEGER_IDX);
+ 
+ -- Table PARTITION_KEYS for join relationship
+ CREATE TABLE PARTITION_KEYS
+ (
+     TBL_ID bigint NOT NULL,
+     PKEY_COMMENT nvarchar(4000) NULL,
+     PKEY_NAME nvarchar(128) NOT NULL,
+     PKEY_TYPE nvarchar(767) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEY_PK PRIMARY KEY (TBL_ID,PKEY_NAME);
+ 
+ -- Table SKEWED_VALUES for join relationship
+ CREATE TABLE SKEWED_VALUES
+ (
+     SD_ID_OID bigint NOT NULL,
+     STRING_LIST_ID_EID bigint NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_PK PRIMARY KEY (SD_ID_OID,INTEGER_IDX);
+ 
+ -- Table SD_PARAMS for join relationship
+ CREATE TABLE SD_PARAMS
+ (
+     SD_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_PK PRIMARY KEY (SD_ID,PARAM_KEY);
+ 
+ -- Table FUNC_RU for join relationship
+ CREATE TABLE FUNC_RU
+ (
+     FUNC_ID bigint NOT NULL,
+     RESOURCE_TYPE int NOT NULL,
+     RESOURCE_URI nvarchar(4000) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_PK PRIMARY KEY (FUNC_ID,INTEGER_IDX);
+ 
+ -- Table TYPE_FIELDS for join relationship
+ CREATE TABLE TYPE_FIELDS
+ (
+     TYPE_NAME bigint NOT NULL,
+     COMMENT nvarchar(256) NULL,
+     FIELD_NAME nvarchar(128) NOT NULL,
+     FIELD_TYPE nvarchar(767) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_PK PRIMARY KEY (TYPE_NAME,FIELD_NAME);
+ 
+ -- Table BUCKETING_COLS for join relationship
+ CREATE TABLE BUCKETING_COLS
+ (
+     SD_ID bigint NOT NULL,
+     BUCKET_COL_NAME nvarchar(255) NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_PK PRIMARY KEY (SD_ID,INTEGER_IDX);
+ 
+ -- Table DATABASE_PARAMS for join relationship
+ CREATE TABLE DATABASE_PARAMS
+ (
+     DB_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(180) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_PK PRIMARY KEY (DB_ID,PARAM_KEY);
+ 
+ -- Table INDEX_PARAMS for join relationship
+ CREATE TABLE INDEX_PARAMS
+ (
+     INDEX_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_PK PRIMARY KEY (INDEX_ID,PARAM_KEY);
+ 
+ -- Table COLUMNS_V2 for join relationship
+ CREATE TABLE COLUMNS_V2
+ (
+     CD_ID bigint NOT NULL,
+     COMMENT nvarchar(256) NULL,
+     "COLUMN_NAME" nvarchar(767) NOT NULL,
+     TYPE_NAME varchar(max) NOT NULL,
+     INTEGER_IDX int NOT NULL
+ );
+ 
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_PK PRIMARY KEY (CD_ID,"COLUMN_NAME");
+ 
+ -- Table SERDE_PARAMS for join relationship
+ CREATE TABLE SERDE_PARAMS
+ (
+     SERDE_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_PK PRIMARY KEY (SERDE_ID,PARAM_KEY);
+ 
+ -- Table PARTITION_PARAMS for join relationship
+ CREATE TABLE PARTITION_PARAMS
+ (
+     PART_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE nvarchar(4000) NULL
+ );
+ 
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_PK PRIMARY KEY (PART_ID,PARAM_KEY);
+ 
+ -- Table TABLE_PARAMS for join relationship
+ CREATE TABLE TABLE_PARAMS
+ (
+     TBL_ID bigint NOT NULL,
+     PARAM_KEY nvarchar(256) NOT NULL,
+     PARAM_VALUE varchar(max) NULL
+ );
+ 
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_PK PRIMARY KEY (TBL_ID,PARAM_KEY);
+ 
+ CREATE TABLE NOTIFICATION_LOG
+ (
+     NL_ID bigint NOT NULL,
+     EVENT_ID bigint NOT NULL,
+     EVENT_TIME int NOT NULL,
+     EVENT_TYPE nvarchar(32) NOT NULL,
+     CAT_NAME nvarchar(128) NULL,
+     DB_NAME nvarchar(128) NULL,
+     TBL_NAME nvarchar(256) NULL,
+     MESSAGE_FORMAT nvarchar(16),
+     MESSAGE text NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_LOG ADD CONSTRAINT NOTIFICATION_LOG_PK PRIMARY KEY (NL_ID);
+ 
+ CREATE TABLE NOTIFICATION_SEQUENCE
+ (
+     NNI_ID bigint NOT NULL,
+     NEXT_EVENT_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE NOTIFICATION_SEQUENCE ADD CONSTRAINT NOTIFICATION_SEQUENCE_PK PRIMARY KEY (NNI_ID);
+ 
+ -- Tables to manage resource plans.
+ 
+ CREATE TABLE WM_RESOURCEPLAN
+ (
+     RP_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NOT NULL,
+     QUERY_PARALLELISM int,
+     STATUS nvarchar(20) NOT NULL,
+     DEFAULT_POOL_ID bigint
+ );
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_PK PRIMARY KEY (RP_ID);
+ 
+ CREATE TABLE WM_POOL
+ (
+     POOL_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     PATH nvarchar(1024) NOT NULL,
+     ALLOC_FRACTION float,
+     QUERY_PARALLELISM int,
+     SCHEDULING_POLICY nvarchar(1024)
+ );
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_PK PRIMARY KEY (POOL_ID);
+ 
+ CREATE TABLE WM_TRIGGER
+ (
+     TRIGGER_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     "NAME" nvarchar(128) NOT NULL,
+     TRIGGER_EXPRESSION nvarchar(1024),
+     ACTION_EXPRESSION nvarchar(1024),
+     IS_IN_UNMANAGED bit NOT NULL DEFAULT 0
+ );
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_PK PRIMARY KEY (TRIGGER_ID);
+ 
+ CREATE TABLE WM_POOL_TO_TRIGGER
+ (
+     POOL_ID bigint NOT NULL,
+     TRIGGER_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_PK PRIMARY KEY (POOL_ID, TRIGGER_ID);
+ 
+ CREATE TABLE WM_MAPPING
+ (
+     MAPPING_ID bigint NOT NULL,
+     RP_ID bigint NOT NULL,
+     ENTITY_TYPE nvarchar(128) NOT NULL,
+     ENTITY_NAME nvarchar(128) NOT NULL,
+     POOL_ID bigint,
+     ORDERING int
+ );
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_PK PRIMARY KEY (MAPPING_ID);
+ 
+ CREATE TABLE CTLGS (
+       CTLG_ID bigint primary key,
+       "NAME" nvarchar(256),
+       "DESC" nvarchar(4000),
+       LOCATION_URI nvarchar(4000) not null
+ );
+ 
+ CREATE UNIQUE INDEX UNIQUE_CTLG ON CTLGS ("NAME");
+ 
+ -- Constraints for table MASTER_KEYS for class(es) [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ 
+ -- Constraints for table IDXS for class(es) [org.apache.hadoop.hive.metastore.model.MIndex]
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK1 FOREIGN KEY (INDEX_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE IDXS ADD CONSTRAINT IDXS_FK3 FOREIGN KEY (ORIG_TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE UNIQUE INDEX UNIQUEINDEX ON IDXS (INDEX_NAME,ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N51 ON IDXS (SD_ID);
+ 
+ CREATE INDEX IDXS_N50 ON IDXS (ORIG_TBL_ID);
+ 
+ CREATE INDEX IDXS_N49 ON IDXS (INDEX_TBL_ID);
+ 
+ 
+ -- Constraints for table PART_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics]
+ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID);
+ 
+ 
+ -- Constraints for table PART_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionPrivilege]
+ ALTER TABLE PART_PRIVS ADD CONSTRAINT PART_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTPRIVILEGEINDEX ON PART_PRIVS (AUTHORIZER,PART_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX PART_PRIVS_N49 ON PART_PRIVS (PART_ID);
+ 
+ 
+ -- Constraints for table SKEWED_STRING_LIST for class(es) [org.apache.hadoop.hive.metastore.model.MStringList]
+ 
+ -- Constraints for table ROLES for class(es) [org.apache.hadoop.hive.metastore.model.MRole]
+ CREATE UNIQUE INDEX ROLEENTITYINDEX ON ROLES (ROLE_NAME);
+ 
+ 
+ -- Constraints for table PARTITIONS for class(es) [org.apache.hadoop.hive.metastore.model.MPartition]
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ ALTER TABLE PARTITIONS ADD CONSTRAINT PARTITIONS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX PARTITIONS_N49 ON PARTITIONS (SD_ID);
+ 
+ CREATE INDEX PARTITIONS_N50 ON PARTITIONS (TBL_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUEPARTITION ON PARTITIONS (PART_NAME,TBL_ID);
+ 
+ 
+ -- Constraints for table CDS for class(es) [org.apache.hadoop.hive.metastore.model.MColumnDescriptor]
+ 
+ -- Constraints for table VERSION for class(es) [org.apache.hadoop.hive.metastore.model.MVersionTable]
+ 
+ -- Constraints for table GLOBAL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MGlobalPrivilege]
+ CREATE UNIQUE INDEX GLOBALPRIVILEGEINDEX ON GLOBAL_PRIVS (AUTHORIZER,PRINCIPAL_NAME,PRINCIPAL_TYPE,USER_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table PART_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege]
+ ALTER TABLE PART_COL_PRIVS ADD CONSTRAINT PART_COL_PRIVS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PART_COL_PRIVS_N49 ON PART_COL_PRIVS (PART_ID);
+ 
+ CREATE INDEX PARTITIONCOLUMNPRIVILEGEINDEX ON PART_COL_PRIVS (AUTHORIZER,PART_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,PART_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DB_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MDBPrivilege]
+ ALTER TABLE DB_PRIVS ADD CONSTRAINT DB_PRIVS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE UNIQUE INDEX DBPRIVILEGEINDEX ON DB_PRIVS (AUTHORIZER,DB_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,DB_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX DB_PRIVS_N49 ON DB_PRIVS (DB_ID);
+ 
+ 
+ -- Constraints for table TAB_COL_STATS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnStatistics]
+ ALTER TABLE TAB_COL_STATS ADD CONSTRAINT TAB_COL_STATS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TAB_COL_STATS_N49 ON TAB_COL_STATS (TBL_ID);
+ 
+ 
+ -- Constraints for table TYPES for class(es) [org.apache.hadoop.hive.metastore.model.MType]
+ CREATE UNIQUE INDEX UNIQUETYPE ON TYPES (TYPE_NAME);
+ 
+ 
+ -- Constraints for table TBL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTablePrivilege]
+ ALTER TABLE TBL_PRIVS ADD CONSTRAINT TBL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TBL_PRIVS_N49 ON TBL_PRIVS (TBL_ID);
+ 
+ CREATE INDEX TABLEPRIVILEGEINDEX ON TBL_PRIVS (AUTHORIZER,TBL_ID,PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table DBS for class(es) [org.apache.hadoop.hive.metastore.model.MDatabase]
+ CREATE UNIQUE INDEX UNIQUEDATABASE ON DBS ("NAME", "CTLG_NAME");
+ 
+ 
+ -- Constraints for table TBL_COL_PRIVS for class(es) [org.apache.hadoop.hive.metastore.model.MTableColumnPrivilege]
+ ALTER TABLE TBL_COL_PRIVS ADD CONSTRAINT TBL_COL_PRIVS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TABLECOLUMNPRIVILEGEINDEX ON TBL_COL_PRIVS (AUTHORIZER,TBL_ID,"COLUMN_NAME",PRINCIPAL_NAME,PRINCIPAL_TYPE,TBL_COL_PRIV,GRANTOR,GRANTOR_TYPE);
+ 
+ CREATE INDEX TBL_COL_PRIVS_N49 ON TBL_COL_PRIVS (TBL_ID);
+ 
+ 
+ -- Constraints for table DELEGATION_TOKENS for class(es) [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ 
+ -- Constraints for table SERDES for class(es) [org.apache.hadoop.hive.metastore.model.MSerDeInfo]
+ 
+ -- Constraints for table FUNCS for class(es) [org.apache.hadoop.hive.metastore.model.MFunction]
+ ALTER TABLE FUNCS ADD CONSTRAINT FUNCS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE UNIQUE INDEX UNIQUEFUNCTION ON FUNCS (FUNC_NAME,DB_ID);
+ 
+ CREATE INDEX FUNCS_N49 ON FUNCS (DB_ID);
+ 
+ 
+ -- Constraints for table ROLE_MAP for class(es) [org.apache.hadoop.hive.metastore.model.MRoleMap]
+ ALTER TABLE ROLE_MAP ADD CONSTRAINT ROLE_MAP_FK1 FOREIGN KEY (ROLE_ID) REFERENCES ROLES (ROLE_ID) ;
+ 
+ CREATE INDEX ROLE_MAP_N49 ON ROLE_MAP (ROLE_ID);
+ 
+ CREATE UNIQUE INDEX USERROLEMAPINDEX ON ROLE_MAP (PRINCIPAL_NAME,ROLE_ID,GRANTOR,GRANTOR_TYPE);
+ 
+ 
+ -- Constraints for table TBLS for class(es) [org.apache.hadoop.hive.metastore.model.MTable]
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK2 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE TBLS ADD CONSTRAINT TBLS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE INDEX TBLS_N50 ON TBLS (SD_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUETABLE ON TBLS (TBL_NAME,DB_ID);
+ 
+ CREATE INDEX TBLS_N49 ON TBLS (DB_ID);
+ 
+ 
+ -- Constraints for table SDS for class(es) [org.apache.hadoop.hive.metastore.model.MStorageDescriptor]
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+ 
+ ALTER TABLE SDS ADD CONSTRAINT SDS_FK2 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+ 
+ CREATE INDEX SDS_N50 ON SDS (CD_ID);
+ 
+ CREATE INDEX SDS_N49 ON SDS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PARTITION_EVENTS for class(es) [org.apache.hadoop.hive.metastore.model.MPartitionEvent]
+ CREATE INDEX PARTITIONEVENTINDEX ON PARTITION_EVENTS (PARTITION_NAME);
+ 
+ 
+ -- Constraints for table SORT_COLS
+ ALTER TABLE SORT_COLS ADD CONSTRAINT SORT_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SORT_COLS_N49 ON SORT_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_COL_NAMES
+ ALTER TABLE SKEWED_COL_NAMES ADD CONSTRAINT SKEWED_COL_NAMES_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SKEWED_COL_NAMES_N49 ON SKEWED_COL_NAMES (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_COL_VALUE_LOC_MAP
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE SKEWED_COL_VALUE_LOC_MAP ADD CONSTRAINT SKEWED_COL_VALUE_LOC_MAP_FK2 FOREIGN KEY (STRING_LIST_ID_KID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N50 ON SKEWED_COL_VALUE_LOC_MAP (STRING_LIST_ID_KID);
+ 
+ CREATE INDEX SKEWED_COL_VALUE_LOC_MAP_N49 ON SKEWED_COL_VALUE_LOC_MAP (SD_ID);
+ 
+ 
+ -- Constraints for table SKEWED_STRING_LIST_VALUES
+ ALTER TABLE SKEWED_STRING_LIST_VALUES ADD CONSTRAINT SKEWED_STRING_LIST_VALUES_FK1 FOREIGN KEY (STRING_LIST_ID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_STRING_LIST_VALUES_N49 ON SKEWED_STRING_LIST_VALUES (STRING_LIST_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEY_VALS
+ ALTER TABLE PARTITION_KEY_VALS ADD CONSTRAINT PARTITION_KEY_VALS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTITION_KEY_VALS_N49 ON PARTITION_KEY_VALS (PART_ID);
+ 
+ 
+ -- Constraints for table PARTITION_KEYS
+ ALTER TABLE PARTITION_KEYS ADD CONSTRAINT PARTITION_KEYS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX PARTITION_KEYS_N49 ON PARTITION_KEYS (TBL_ID);
+ 
+ 
+ -- Constraints for table SKEWED_VALUES
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK1 FOREIGN KEY (SD_ID_OID) REFERENCES SDS (SD_ID) ;
+ 
+ ALTER TABLE SKEWED_VALUES ADD CONSTRAINT SKEWED_VALUES_FK2 FOREIGN KEY (STRING_LIST_ID_EID) REFERENCES SKEWED_STRING_LIST (STRING_LIST_ID) ;
+ 
+ CREATE INDEX SKEWED_VALUES_N50 ON SKEWED_VALUES (SD_ID_OID);
+ 
+ CREATE INDEX SKEWED_VALUES_N49 ON SKEWED_VALUES (STRING_LIST_ID_EID);
+ 
+ 
+ -- Constraints for table SD_PARAMS
+ ALTER TABLE SD_PARAMS ADD CONSTRAINT SD_PARAMS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX SD_PARAMS_N49 ON SD_PARAMS (SD_ID);
+ 
+ 
+ -- Constraints for table FUNC_RU
+ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES FUNCS (FUNC_ID) ;
+ 
+ CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID);
+ 
+ 
+ -- Constraints for table TYPE_FIELDS
+ ALTER TABLE TYPE_FIELDS ADD CONSTRAINT TYPE_FIELDS_FK1 FOREIGN KEY (TYPE_NAME) REFERENCES TYPES (TYPES_ID) ;
+ 
+ CREATE INDEX TYPE_FIELDS_N49 ON TYPE_FIELDS (TYPE_NAME);
+ 
+ 
+ -- Constraints for table BUCKETING_COLS
+ ALTER TABLE BUCKETING_COLS ADD CONSTRAINT BUCKETING_COLS_FK1 FOREIGN KEY (SD_ID) REFERENCES SDS (SD_ID) ;
+ 
+ CREATE INDEX BUCKETING_COLS_N49 ON BUCKETING_COLS (SD_ID);
+ 
+ 
+ -- Constraints for table DATABASE_PARAMS
+ ALTER TABLE DATABASE_PARAMS ADD CONSTRAINT DATABASE_PARAMS_FK1 FOREIGN KEY (DB_ID) REFERENCES DBS (DB_ID) ;
+ 
+ CREATE INDEX DATABASE_PARAMS_N49 ON DATABASE_PARAMS (DB_ID);
+ 
+ 
+ -- Constraints for table INDEX_PARAMS
+ ALTER TABLE INDEX_PARAMS ADD CONSTRAINT INDEX_PARAMS_FK1 FOREIGN KEY (INDEX_ID) REFERENCES IDXS (INDEX_ID) ;
+ 
+ CREATE INDEX INDEX_PARAMS_N49 ON INDEX_PARAMS (INDEX_ID);
+ 
+ 
+ -- Constraints for table COLUMNS_V2
+ ALTER TABLE COLUMNS_V2 ADD CONSTRAINT COLUMNS_V2_FK1 FOREIGN KEY (CD_ID) REFERENCES CDS (CD_ID) ;
+ 
+ CREATE INDEX COLUMNS_V2_N49 ON COLUMNS_V2 (CD_ID);
+ 
+ 
+ -- Constraints for table SERDE_PARAMS
+ ALTER TABLE SERDE_PARAMS ADD CONSTRAINT SERDE_PARAMS_FK1 FOREIGN KEY (SERDE_ID) REFERENCES SERDES (SERDE_ID) ;
+ 
+ CREATE INDEX SERDE_PARAMS_N49 ON SERDE_PARAMS (SERDE_ID);
+ 
+ 
+ -- Constraints for table PARTITION_PARAMS
+ ALTER TABLE PARTITION_PARAMS ADD CONSTRAINT PARTITION_PARAMS_FK1 FOREIGN KEY (PART_ID) REFERENCES PARTITIONS (PART_ID) ;
+ 
+ CREATE INDEX PARTITION_PARAMS_N49 ON PARTITION_PARAMS (PART_ID);
+ 
+ 
+ -- Constraints for table TABLE_PARAMS
+ ALTER TABLE TABLE_PARAMS ADD CONSTRAINT TABLE_PARAMS_FK1 FOREIGN KEY (TBL_ID) REFERENCES TBLS (TBL_ID) ;
+ 
+ CREATE INDEX TABLE_PARAMS_N49 ON TABLE_PARAMS (TBL_ID);
+ 
+ -- Constraints for resource plan tables.
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_RESOURCEPLAN ON WM_RESOURCEPLAN ("NAME");
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_POOL ON WM_POOL (RP_ID, PATH);
+ 
+ ALTER TABLE WM_RESOURCEPLAN ADD CONSTRAINT WM_RESOURCEPLAN_FK1 FOREIGN KEY (DEFAULT_POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL ADD CONSTRAINT WM_POOL_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_TRIGGER ON WM_TRIGGER (RP_ID, "NAME");
+ 
+ ALTER TABLE WM_TRIGGER ADD CONSTRAINT WM_TRIGGER_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK1 FOREIGN KEY (POOL_ID) REFERENCES WM_POOL (POOL_ID);
+ 
+ ALTER TABLE WM_POOL_TO_TRIGGER ADD CONSTRAINT WM_POOL_TO_TRIGGER_FK2 FOREIGN KEY (TRIGGER_ID) REFERENCES WM_TRIGGER (TRIGGER_ID);
+ 
+ CREATE UNIQUE INDEX UNIQUE_WM_MAPPING ON WM_MAPPING (RP_ID, ENTITY_TYPE, ENTITY_NAME);
+ 
+ ALTER TABLE WM_MAPPING ADD CONSTRAINT WM_MAPPING_FK1 FOREIGN KEY (RP_ID) REFERENCES WM_RESOURCEPLAN (RP_ID);
+ 
+ ALTER TABLE DBS ADD CONSTRAINT "DBS_FK1" FOREIGN KEY ("CTLG_NAME") REFERENCES CTLGS ("NAME");
+ -- -----------------------------------------------------------------------------------------------------------------------------------------------
+ -- Transaction and Lock Tables
+ -- These are not part of package jdo, so if you are going to regenerate this file you need to manually add the following section back to the file.
+ -- -----------------------------------------------------------------------------------------------------------------------------------------------
+ CREATE TABLE COMPACTION_QUEUE(
+ 	CQ_ID bigint NOT NULL,
+ 	CQ_DATABASE nvarchar(128) NOT NULL,
+ 	CQ_TABLE nvarchar(128) NOT NULL,
+ 	CQ_PARTITION nvarchar(767) NULL,
+ 	CQ_STATE char(1) NOT NULL,
+ 	CQ_TYPE char(1) NOT NULL,
+ 	CQ_TBLPROPERTIES nvarchar(2048) NULL,
+ 	CQ_WORKER_ID nvarchar(128) NULL,
+ 	CQ_START bigint NULL,
+ 	CQ_RUN_AS nvarchar(128) NULL,
+     CQ_HIGHEST_WRITE_ID bigint NULL,
+     CQ_META_INFO varbinary(2048) NULL,
+ 	CQ_HADOOP_JOB_ID nvarchar(128) NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	CQ_ID ASC
+ )
+ );
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+ 	CC_ID bigint NOT NULL,
+ 	CC_DATABASE nvarchar(128) NOT NULL,
+ 	CC_TABLE nvarchar(128) NOT NULL,
+ 	CC_PARTITION nvarchar(767) NULL,
+ 	CC_STATE char(1) NOT NULL,
+ 	CC_TYPE char(1) NOT NULL,
+ 	CC_TBLPROPERTIES nvarchar(2048) NULL,
+ 	CC_WORKER_ID nvarchar(128) NULL,
+ 	CC_START bigint NULL,
+ 	CC_END bigint NULL,
+ 	CC_RUN_AS nvarchar(128) NULL,
+     CC_HIGHEST_WRITE_ID bigint NULL,
+     CC_META_INFO varbinary(2048) NULL,
+ 	CC_HADOOP_JOB_ID nvarchar(128) NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	CC_ID ASC
+ )
+ );
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS(
+ 	CTC_TXNID bigint NOT NULL,
+ 	CTC_DATABASE nvarchar(128) NOT NULL,
+ 	CTC_TABLE nvarchar(128) NULL,
+ 	CTC_PARTITION nvarchar(767) NULL,
+     CTC_TIMESTAMP datetime2 DEFAULT CURRENT_TIMESTAMP NOT NULL,
+     CTC_WRITEID bigint
+ );
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION);
+ 
+ CREATE TABLE HIVE_LOCKS(
+ 	HL_LOCK_EXT_ID bigint NOT NULL,
+ 	HL_LOCK_INT_ID bigint NOT NULL,
+ 	HL_TXNID bigint NOT NULL,
+ 	HL_DB nvarchar(128) NOT NULL,
+ 	HL_TABLE nvarchar(128) NULL,
+ 	HL_PARTITION nvarchar(767) NULL,
+ 	HL_LOCK_STATE char(1) NOT NULL,
+ 	HL_LOCK_TYPE char(1) NOT NULL,
+ 	HL_LAST_HEARTBEAT bigint NOT NULL,
+ 	HL_ACQUIRED_AT bigint NULL,
+ 	HL_USER nvarchar(128) NOT NULL,
+ 	HL_HOST nvarchar(128) NOT NULL,
+     HL_HEARTBEAT_COUNT int NULL,
+     HL_AGENT_INFO nvarchar(128) NULL,
+     HL_BLOCKEDBY_EXT_ID bigint NULL,
+     HL_BLOCKEDBY_INT_ID bigint NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	HL_LOCK_EXT_ID ASC,
+ 	HL_LOCK_INT_ID ASC
+ )
+ );
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID(
+ 	NCQ_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE NEXT_LOCK_ID(
+ 	NL_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE NEXT_TXN_ID(
+ 	NTXN_NEXT bigint NOT NULL
+ );
+ 
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE TXNS(
+ 	TXN_ID bigint NOT NULL,
+ 	TXN_STATE char(1) NOT NULL,
+ 	TXN_STARTED bigint NOT NULL,
+ 	TXN_LAST_HEARTBEAT bigint NOT NULL,
+ 	TXN_USER nvarchar(128) NOT NULL,
+ 	TXN_HOST nvarchar(128) NOT NULL,
+     TXN_AGENT_INFO nvarchar(128) NULL,
+     TXN_META_INFO nvarchar(128) NULL,
+     TXN_HEARTBEAT_COUNT int NULL,
+     TXN_TYPE int NULL,
+ PRIMARY KEY CLUSTERED
+ (
+ 	TXN_ID ASC
+ )
+ );
+ 
+ CREATE TABLE TXN_COMPONENTS(
+ 	TC_TXNID bigint NOT NULL,
+ 	TC_DATABASE nvarchar(128) NOT NULL,
+ 	TC_TABLE nvarchar(128) NULL,
+ 	TC_PARTITION nvarchar(767) NULL,
+     TC_OPERATION_TYPE char(1) NOT NULL,
+     TC_WRITEID bigint
+ );
+ 
+ ALTER TABLE TXN_COMPONENTS  WITH CHECK ADD FOREIGN KEY(TC_TXNID) REFERENCES TXNS (TXN_ID);
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 nvarchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT nvarchar(255) NULL,
+   PRIMARY KEY CLUSTERED
+ (
+     MT_KEY1 ASC,
+     MT_KEY2 ASC
+ )
+ );
+ 
+ CREATE TABLE KEY_CONSTRAINTS
+ (
+   CHILD_CD_ID BIGINT,
+   CHILD_INTEGER_IDX INT,
+   CHILD_TBL_ID BIGINT,
+   PARENT_CD_ID BIGINT,
+   PARENT_INTEGER_IDX INT NOT NULL,
+   PARENT_TBL_ID BIGINT NOT NULL,
+   POSITION INT NOT NULL,
+   CONSTRAINT_NAME VARCHAR(400) NOT NULL,
+   CONSTRAINT_TYPE SMALLINT NOT NULL,
+   UPDATE_RULE SMALLINT,
+   DELETE_RULE SMALLINT,
+   ENABLE_VALIDATE_RELY SMALLINT NOT NULL,
+   DEFAULT_VALUE VARCHAR(400)
+ ) ;
+ 
+ ALTER TABLE KEY_CONSTRAINTS ADD CONSTRAINT CONSTRAINTS_PK PRIMARY KEY (CONSTRAINT_NAME, POSITION);
+ 
+ CREATE INDEX CONSTRAINTS_PARENT_TBL_ID__INDEX ON KEY_CONSTRAINTS(PARENT_TBL_ID);
+ 
+ CREATE INDEX CONSTRAINTS_CONSTRAINT_TYPE_INDEX ON KEY_CONSTRAINTS(CONSTRAINT_TYPE);
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE nvarchar(128) NOT NULL,
+   WS_TABLE nvarchar(128) NOT NULL,
+   WS_PARTITION nvarchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ );
+ 
+ CREATE TABLE METASTORE_DB_PROPERTIES (
+   PROPERTY_KEY VARCHAR(255) NOT NULL,
+   PROPERTY_VALUE VARCHAR(1000) NOT NULL,
+   DESCRIPTION VARCHAR(1000)
+ );
+ 
+ ALTER TABLE METASTORE_DB_PROPERTIES ADD CONSTRAINT PROPERTY_KEY_PK PRIMARY KEY (PROPERTY_KEY);
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE nvarchar(128) NOT NULL,
+   T2W_TABLE nvarchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE nvarchar(128) NOT NULL,
+   NWI_TABLE nvarchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+ PRIMARY KEY CLUSTERED
+ (
+     MHL_TXNID ASC
+ )
+ );
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE "I_SCHEMA" (
+   "SCHEMA_ID" bigint primary key,
+   "SCHEMA_TYPE" int not null,
+   "NAME" nvarchar(256) unique,
+   "DB_ID" bigint references "DBS" ("DB_ID"),
+   "COMPATIBILITY" int not null,
+   "VALIDATION_LEVEL" int not null,
+   "CAN_EVOLVE" bit not null,
+   "SCHEMA_GROUP" nvarchar(256),
+   "DESCRIPTION" nvarchar(4000),
+ );
+ 
+ CREATE TABLE "SCHEMA_VERSION" (
+   "SCHEMA_VERSION_ID" bigint primary key,
+   "SCHEMA_ID" bigint references "I_SCHEMA" ("SCHEMA_ID"),
+   "VERSION" int not null,
+   "CREATED_AT" bigint not null,
+   "CD_ID" bigint references "CDS" ("CD_ID"),
+   "STATE" int not null,
+   "DESCRIPTION" nvarchar(4000),
+   "SCHEMA_TEXT" varchar(max),
+   "FINGERPRINT" nvarchar(256),
+   "SCHEMA_VERSION_NAME" nvarchar(256),
+   "SERDE_ID" bigint references "SERDES" ("SERDE_ID"),
+   unique ("SCHEMA_ID", "VERSION")
+ );
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY nvarchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL
+ );
+ 
+ ALTER TABLE REPL_TXN_MAP ADD CONSTRAINT REPL_TXN_MAP_PK PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID);
+ 
+ -- Table SEQUENCE_TABLE is an internal table required by DataNucleus.
+ -- NOTE: Some versions of SchemaTool do not automatically generate this table.
+ -- See http://www.datanucleus.org/servlet/jira/browse/NUCRDBMS-416
+ CREATE TABLE SEQUENCE_TABLE
+ (
+    SEQUENCE_NAME nvarchar(256) NOT NULL,
+    NEXT_VAL bigint NOT NULL
+ );
+ 
+ CREATE UNIQUE INDEX PART_TABLE_PK ON SEQUENCE_TABLE (SEQUENCE_NAME);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID bigint primary key,
+   CREATE_TIME bigint NOT NULL,
+   WEIGHT bigint NOT NULL,
+   PAYLOAD varbinary(max)
+ );
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE nvarchar(128) NOT NULL,
+   WNL_TABLE nvarchar(128) NOT NULL,
+   WNL_PARTITION nvarchar(1024) NOT NULL,
+   WNL_TABLE_OBJ text NOT NULL,
+   WNL_PARTITION_OBJ text,
+   WNL_FILES text,
+   WNL_EVENT_TIME int NOT NULL
+ );
+ 
+ ALTER TABLE TXN_WRITE_NOTIFICATION_LOG ADD CONSTRAINT TXN_WRITE_NOTIFICATION_LOG_PK PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION);
+ 
+ INSERT INTO SEQUENCE_TABLE (SEQUENCE_NAME, NEXT_VAL) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
index 0000000,27b7026..acc9361
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint NULL;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint NULL;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS MESSAGE;
+ 


[33/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
new file mode 100644
index 0000000..8553140
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Date.java
@@ -0,0 +1,387 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Date implements org.apache.thrift.TBase<Date, Date._Fields>, java.io.Serializable, Cloneable, Comparable<Date> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Date");
+
+  private static final org.apache.thrift.protocol.TField DAYS_SINCE_EPOCH_FIELD_DESC = new org.apache.thrift.protocol.TField("daysSinceEpoch", org.apache.thrift.protocol.TType.I64, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DateStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DateTupleSchemeFactory());
+  }
+
+  private long daysSinceEpoch; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DAYS_SINCE_EPOCH((short)1, "daysSinceEpoch");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DAYS_SINCE_EPOCH
+          return DAYS_SINCE_EPOCH;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __DAYSSINCEEPOCH_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DAYS_SINCE_EPOCH, new org.apache.thrift.meta_data.FieldMetaData("daysSinceEpoch", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Date.class, metaDataMap);
+  }
+
+  public Date() {
+  }
+
+  public Date(
+    long daysSinceEpoch)
+  {
+    this();
+    this.daysSinceEpoch = daysSinceEpoch;
+    setDaysSinceEpochIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Date(Date other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.daysSinceEpoch = other.daysSinceEpoch;
+  }
+
+  public Date deepCopy() {
+    return new Date(this);
+  }
+
+  @Override
+  public void clear() {
+    setDaysSinceEpochIsSet(false);
+    this.daysSinceEpoch = 0;
+  }
+
+  public long getDaysSinceEpoch() {
+    return this.daysSinceEpoch;
+  }
+
+  public void setDaysSinceEpoch(long daysSinceEpoch) {
+    this.daysSinceEpoch = daysSinceEpoch;
+    setDaysSinceEpochIsSet(true);
+  }
+
+  public void unsetDaysSinceEpoch() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DAYSSINCEEPOCH_ISSET_ID);
+  }
+
+  /** Returns true if field daysSinceEpoch is set (has been assigned a value) and false otherwise */
+  public boolean isSetDaysSinceEpoch() {
+    return EncodingUtils.testBit(__isset_bitfield, __DAYSSINCEEPOCH_ISSET_ID);
+  }
+
+  public void setDaysSinceEpochIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DAYSSINCEEPOCH_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DAYS_SINCE_EPOCH:
+      if (value == null) {
+        unsetDaysSinceEpoch();
+      } else {
+        setDaysSinceEpoch((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DAYS_SINCE_EPOCH:
+      return getDaysSinceEpoch();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DAYS_SINCE_EPOCH:
+      return isSetDaysSinceEpoch();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Date)
+      return this.equals((Date)that);
+    return false;
+  }
+
+  public boolean equals(Date that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_daysSinceEpoch = true;
+    boolean that_present_daysSinceEpoch = true;
+    if (this_present_daysSinceEpoch || that_present_daysSinceEpoch) {
+      if (!(this_present_daysSinceEpoch && that_present_daysSinceEpoch))
+        return false;
+      if (this.daysSinceEpoch != that.daysSinceEpoch)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_daysSinceEpoch = true;
+    list.add(present_daysSinceEpoch);
+    if (present_daysSinceEpoch)
+      list.add(daysSinceEpoch);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Date other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDaysSinceEpoch()).compareTo(other.isSetDaysSinceEpoch());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDaysSinceEpoch()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.daysSinceEpoch, other.daysSinceEpoch);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Date(");
+    boolean first = true;
+
+    sb.append("daysSinceEpoch:");
+    sb.append(this.daysSinceEpoch);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDaysSinceEpoch()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'daysSinceEpoch' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DateStandardSchemeFactory implements SchemeFactory {
+    public DateStandardScheme getScheme() {
+      return new DateStandardScheme();
+    }
+  }
+
+  private static class DateStandardScheme extends StandardScheme<Date> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Date struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DAYS_SINCE_EPOCH
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.daysSinceEpoch = iprot.readI64();
+              struct.setDaysSinceEpochIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Date struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(DAYS_SINCE_EPOCH_FIELD_DESC);
+      oprot.writeI64(struct.daysSinceEpoch);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DateTupleSchemeFactory implements SchemeFactory {
+    public DateTupleScheme getScheme() {
+      return new DateTupleScheme();
+    }
+  }
+
+  private static class DateTupleScheme extends TupleScheme<Date> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Date struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.daysSinceEpoch);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Date struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.daysSinceEpoch = iprot.readI64();
+      struct.setDaysSinceEpochIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
new file mode 100644
index 0000000..78100f9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DateColumnStatsData.java
@@ -0,0 +1,823 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DateColumnStatsData implements org.apache.thrift.TBase<DateColumnStatsData, DateColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DateColumnStatsData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DateColumnStatsData");
+
+  private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4);
+  private static final org.apache.thrift.protocol.TField BIT_VECTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("bitVectors", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DateColumnStatsDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DateColumnStatsDataTupleSchemeFactory());
+  }
+
+  private Date lowValue; // optional
+  private Date highValue; // optional
+  private long numNulls; // required
+  private long numDVs; // required
+  private ByteBuffer bitVectors; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOW_VALUE((short)1, "lowValue"),
+    HIGH_VALUE((short)2, "highValue"),
+    NUM_NULLS((short)3, "numNulls"),
+    NUM_DVS((short)4, "numDVs"),
+    BIT_VECTORS((short)5, "bitVectors");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOW_VALUE
+          return LOW_VALUE;
+        case 2: // HIGH_VALUE
+          return HIGH_VALUE;
+        case 3: // NUM_NULLS
+          return NUM_NULLS;
+        case 4: // NUM_DVS
+          return NUM_DVS;
+        case 5: // BIT_VECTORS
+          return BIT_VECTORS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __NUMNULLS_ISSET_ID = 0;
+  private static final int __NUMDVS_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE,_Fields.BIT_VECTORS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Date.class)));
+    tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Date.class)));
+    tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.BIT_VECTORS, new org.apache.thrift.meta_data.FieldMetaData("bitVectors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DateColumnStatsData.class, metaDataMap);
+  }
+
+  public DateColumnStatsData() {
+  }
+
+  public DateColumnStatsData(
+    long numNulls,
+    long numDVs)
+  {
+    this();
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DateColumnStatsData(DateColumnStatsData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetLowValue()) {
+      this.lowValue = new Date(other.lowValue);
+    }
+    if (other.isSetHighValue()) {
+      this.highValue = new Date(other.highValue);
+    }
+    this.numNulls = other.numNulls;
+    this.numDVs = other.numDVs;
+    if (other.isSetBitVectors()) {
+      this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(other.bitVectors);
+    }
+  }
+
+  public DateColumnStatsData deepCopy() {
+    return new DateColumnStatsData(this);
+  }
+
+  @Override
+  public void clear() {
+    this.lowValue = null;
+    this.highValue = null;
+    setNumNullsIsSet(false);
+    this.numNulls = 0;
+    setNumDVsIsSet(false);
+    this.numDVs = 0;
+    this.bitVectors = null;
+  }
+
+  public Date getLowValue() {
+    return this.lowValue;
+  }
+
+  public void setLowValue(Date lowValue) {
+    this.lowValue = lowValue;
+  }
+
+  public void unsetLowValue() {
+    this.lowValue = null;
+  }
+
+  /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetLowValue() {
+    return this.lowValue != null;
+  }
+
+  public void setLowValueIsSet(boolean value) {
+    if (!value) {
+      this.lowValue = null;
+    }
+  }
+
+  public Date getHighValue() {
+    return this.highValue;
+  }
+
+  public void setHighValue(Date highValue) {
+    this.highValue = highValue;
+  }
+
+  public void unsetHighValue() {
+    this.highValue = null;
+  }
+
+  /** Returns true if field highValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetHighValue() {
+    return this.highValue != null;
+  }
+
+  public void setHighValueIsSet(boolean value) {
+    if (!value) {
+      this.highValue = null;
+    }
+  }
+
+  public long getNumNulls() {
+    return this.numNulls;
+  }
+
+  public void setNumNulls(long numNulls) {
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  public void unsetNumNulls() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumNulls() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  public void setNumNullsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value);
+  }
+
+  public long getNumDVs() {
+    return this.numDVs;
+  }
+
+  public void setNumDVs(long numDVs) {
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  public void unsetNumDVs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumDVs() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  public void setNumDVsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value);
+  }
+
+  public byte[] getBitVectors() {
+    setBitVectors(org.apache.thrift.TBaseHelper.rightSize(bitVectors));
+    return bitVectors == null ? null : bitVectors.array();
+  }
+
+  public ByteBuffer bufferForBitVectors() {
+    return org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void setBitVectors(byte[] bitVectors) {
+    this.bitVectors = bitVectors == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bitVectors, bitVectors.length));
+  }
+
+  public void setBitVectors(ByteBuffer bitVectors) {
+    this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void unsetBitVectors() {
+    this.bitVectors = null;
+  }
+
+  /** Returns true if field bitVectors is set (has been assigned a value) and false otherwise */
+  public boolean isSetBitVectors() {
+    return this.bitVectors != null;
+  }
+
+  public void setBitVectorsIsSet(boolean value) {
+    if (!value) {
+      this.bitVectors = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOW_VALUE:
+      if (value == null) {
+        unsetLowValue();
+      } else {
+        setLowValue((Date)value);
+      }
+      break;
+
+    case HIGH_VALUE:
+      if (value == null) {
+        unsetHighValue();
+      } else {
+        setHighValue((Date)value);
+      }
+      break;
+
+    case NUM_NULLS:
+      if (value == null) {
+        unsetNumNulls();
+      } else {
+        setNumNulls((Long)value);
+      }
+      break;
+
+    case NUM_DVS:
+      if (value == null) {
+        unsetNumDVs();
+      } else {
+        setNumDVs((Long)value);
+      }
+      break;
+
+    case BIT_VECTORS:
+      if (value == null) {
+        unsetBitVectors();
+      } else {
+        setBitVectors((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOW_VALUE:
+      return getLowValue();
+
+    case HIGH_VALUE:
+      return getHighValue();
+
+    case NUM_NULLS:
+      return getNumNulls();
+
+    case NUM_DVS:
+      return getNumDVs();
+
+    case BIT_VECTORS:
+      return getBitVectors();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOW_VALUE:
+      return isSetLowValue();
+    case HIGH_VALUE:
+      return isSetHighValue();
+    case NUM_NULLS:
+      return isSetNumNulls();
+    case NUM_DVS:
+      return isSetNumDVs();
+    case BIT_VECTORS:
+      return isSetBitVectors();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DateColumnStatsData)
+      return this.equals((DateColumnStatsData)that);
+    return false;
+  }
+
+  public boolean equals(DateColumnStatsData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lowValue = true && this.isSetLowValue();
+    boolean that_present_lowValue = true && that.isSetLowValue();
+    if (this_present_lowValue || that_present_lowValue) {
+      if (!(this_present_lowValue && that_present_lowValue))
+        return false;
+      if (!this.lowValue.equals(that.lowValue))
+        return false;
+    }
+
+    boolean this_present_highValue = true && this.isSetHighValue();
+    boolean that_present_highValue = true && that.isSetHighValue();
+    if (this_present_highValue || that_present_highValue) {
+      if (!(this_present_highValue && that_present_highValue))
+        return false;
+      if (!this.highValue.equals(that.highValue))
+        return false;
+    }
+
+    boolean this_present_numNulls = true;
+    boolean that_present_numNulls = true;
+    if (this_present_numNulls || that_present_numNulls) {
+      if (!(this_present_numNulls && that_present_numNulls))
+        return false;
+      if (this.numNulls != that.numNulls)
+        return false;
+    }
+
+    boolean this_present_numDVs = true;
+    boolean that_present_numDVs = true;
+    if (this_present_numDVs || that_present_numDVs) {
+      if (!(this_present_numDVs && that_present_numDVs))
+        return false;
+      if (this.numDVs != that.numDVs)
+        return false;
+    }
+
+    boolean this_present_bitVectors = true && this.isSetBitVectors();
+    boolean that_present_bitVectors = true && that.isSetBitVectors();
+    if (this_present_bitVectors || that_present_bitVectors) {
+      if (!(this_present_bitVectors && that_present_bitVectors))
+        return false;
+      if (!this.bitVectors.equals(that.bitVectors))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lowValue = true && (isSetLowValue());
+    list.add(present_lowValue);
+    if (present_lowValue)
+      list.add(lowValue);
+
+    boolean present_highValue = true && (isSetHighValue());
+    list.add(present_highValue);
+    if (present_highValue)
+      list.add(highValue);
+
+    boolean present_numNulls = true;
+    list.add(present_numNulls);
+    if (present_numNulls)
+      list.add(numNulls);
+
+    boolean present_numDVs = true;
+    list.add(present_numDVs);
+    if (present_numDVs)
+      list.add(numDVs);
+
+    boolean present_bitVectors = true && (isSetBitVectors());
+    list.add(present_bitVectors);
+    if (present_bitVectors)
+      list.add(bitVectors);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DateColumnStatsData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(other.isSetLowValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLowValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, other.lowValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(other.isSetHighValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHighValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, other.highValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(other.isSetNumNulls());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumNulls()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, other.numNulls);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(other.isSetNumDVs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumDVs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, other.numDVs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetBitVectors()).compareTo(other.isSetBitVectors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBitVectors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bitVectors, other.bitVectors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DateColumnStatsData(");
+    boolean first = true;
+
+    if (isSetLowValue()) {
+      sb.append("lowValue:");
+      if (this.lowValue == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.lowValue);
+      }
+      first = false;
+    }
+    if (isSetHighValue()) {
+      if (!first) sb.append(", ");
+      sb.append("highValue:");
+      if (this.highValue == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.highValue);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("numNulls:");
+    sb.append(this.numNulls);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numDVs:");
+    sb.append(this.numDVs);
+    first = false;
+    if (isSetBitVectors()) {
+      if (!first) sb.append(", ");
+      sb.append("bitVectors:");
+      if (this.bitVectors == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.bitVectors, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNumNulls()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumDVs()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (lowValue != null) {
+      lowValue.validate();
+    }
+    if (highValue != null) {
+      highValue.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DateColumnStatsDataStandardSchemeFactory implements SchemeFactory {
+    public DateColumnStatsDataStandardScheme getScheme() {
+      return new DateColumnStatsDataStandardScheme();
+    }
+  }
+
+  private static class DateColumnStatsDataStandardScheme extends StandardScheme<DateColumnStatsData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DateColumnStatsData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOW_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.lowValue = new Date();
+              struct.lowValue.read(iprot);
+              struct.setLowValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // HIGH_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.highValue = new Date();
+              struct.highValue.read(iprot);
+              struct.setHighValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NUM_NULLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numNulls = iprot.readI64();
+              struct.setNumNullsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // NUM_DVS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numDVs = iprot.readI64();
+              struct.setNumDVsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // BIT_VECTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.bitVectors = iprot.readBinary();
+              struct.setBitVectorsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DateColumnStatsData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.lowValue != null) {
+        if (struct.isSetLowValue()) {
+          oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC);
+          struct.lowValue.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.highValue != null) {
+        if (struct.isSetHighValue()) {
+          oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC);
+          struct.highValue.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC);
+      oprot.writeI64(struct.numNulls);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_DVS_FIELD_DESC);
+      oprot.writeI64(struct.numDVs);
+      oprot.writeFieldEnd();
+      if (struct.bitVectors != null) {
+        if (struct.isSetBitVectors()) {
+          oprot.writeFieldBegin(BIT_VECTORS_FIELD_DESC);
+          oprot.writeBinary(struct.bitVectors);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DateColumnStatsDataTupleSchemeFactory implements SchemeFactory {
+    public DateColumnStatsDataTupleScheme getScheme() {
+      return new DateColumnStatsDataTupleScheme();
+    }
+  }
+
+  private static class DateColumnStatsDataTupleScheme extends TupleScheme<DateColumnStatsData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DateColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.numNulls);
+      oprot.writeI64(struct.numDVs);
+      BitSet optionals = new BitSet();
+      if (struct.isSetLowValue()) {
+        optionals.set(0);
+      }
+      if (struct.isSetHighValue()) {
+        optionals.set(1);
+      }
+      if (struct.isSetBitVectors()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetLowValue()) {
+        struct.lowValue.write(oprot);
+      }
+      if (struct.isSetHighValue()) {
+        struct.highValue.write(oprot);
+      }
+      if (struct.isSetBitVectors()) {
+        oprot.writeBinary(struct.bitVectors);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DateColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.numNulls = iprot.readI64();
+      struct.setNumNullsIsSet(true);
+      struct.numDVs = iprot.readI64();
+      struct.setNumDVsIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.lowValue = new Date();
+        struct.lowValue.read(iprot);
+        struct.setLowValueIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.highValue = new Date();
+        struct.highValue.read(iprot);
+        struct.setHighValueIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.bitVectors = iprot.readBinary();
+        struct.setBitVectorsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
new file mode 100644
index 0000000..361d58a
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Decimal.java
@@ -0,0 +1,497 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Decimal implements org.apache.thrift.TBase<Decimal, Decimal._Fields>, java.io.Serializable, Cloneable, Comparable<Decimal> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Decimal");
+
+  private static final org.apache.thrift.protocol.TField SCALE_FIELD_DESC = new org.apache.thrift.protocol.TField("scale", org.apache.thrift.protocol.TType.I16, (short)3);
+  private static final org.apache.thrift.protocol.TField UNSCALED_FIELD_DESC = new org.apache.thrift.protocol.TField("unscaled", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DecimalStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DecimalTupleSchemeFactory());
+  }
+
+  private short scale; // required
+  private ByteBuffer unscaled; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    SCALE((short)3, "scale"),
+    UNSCALED((short)1, "unscaled");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 3: // SCALE
+          return SCALE;
+        case 1: // UNSCALED
+          return UNSCALED;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __SCALE_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.SCALE, new org.apache.thrift.meta_data.FieldMetaData("scale", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)));
+    tmpMap.put(_Fields.UNSCALED, new org.apache.thrift.meta_data.FieldMetaData("unscaled", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Decimal.class, metaDataMap);
+  }
+
+  public Decimal() {
+  }
+
+  public Decimal(
+    short scale,
+    ByteBuffer unscaled)
+  {
+    this();
+    this.scale = scale;
+    setScaleIsSet(true);
+    this.unscaled = org.apache.thrift.TBaseHelper.copyBinary(unscaled);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Decimal(Decimal other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.scale = other.scale;
+    if (other.isSetUnscaled()) {
+      this.unscaled = org.apache.thrift.TBaseHelper.copyBinary(other.unscaled);
+    }
+  }
+
+  public Decimal deepCopy() {
+    return new Decimal(this);
+  }
+
+  @Override
+  public void clear() {
+    setScaleIsSet(false);
+    this.scale = 0;
+    this.unscaled = null;
+  }
+
+  public short getScale() {
+    return this.scale;
+  }
+
+  public void setScale(short scale) {
+    this.scale = scale;
+    setScaleIsSet(true);
+  }
+
+  public void unsetScale() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SCALE_ISSET_ID);
+  }
+
+  /** Returns true if field scale is set (has been assigned a value) and false otherwise */
+  public boolean isSetScale() {
+    return EncodingUtils.testBit(__isset_bitfield, __SCALE_ISSET_ID);
+  }
+
+  public void setScaleIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SCALE_ISSET_ID, value);
+  }
+
+  public byte[] getUnscaled() {
+    setUnscaled(org.apache.thrift.TBaseHelper.rightSize(unscaled));
+    return unscaled == null ? null : unscaled.array();
+  }
+
+  public ByteBuffer bufferForUnscaled() {
+    return org.apache.thrift.TBaseHelper.copyBinary(unscaled);
+  }
+
+  public void setUnscaled(byte[] unscaled) {
+    this.unscaled = unscaled == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(unscaled, unscaled.length));
+  }
+
+  public void setUnscaled(ByteBuffer unscaled) {
+    this.unscaled = org.apache.thrift.TBaseHelper.copyBinary(unscaled);
+  }
+
+  public void unsetUnscaled() {
+    this.unscaled = null;
+  }
+
+  /** Returns true if field unscaled is set (has been assigned a value) and false otherwise */
+  public boolean isSetUnscaled() {
+    return this.unscaled != null;
+  }
+
+  public void setUnscaledIsSet(boolean value) {
+    if (!value) {
+      this.unscaled = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case SCALE:
+      if (value == null) {
+        unsetScale();
+      } else {
+        setScale((Short)value);
+      }
+      break;
+
+    case UNSCALED:
+      if (value == null) {
+        unsetUnscaled();
+      } else {
+        setUnscaled((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case SCALE:
+      return getScale();
+
+    case UNSCALED:
+      return getUnscaled();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case SCALE:
+      return isSetScale();
+    case UNSCALED:
+      return isSetUnscaled();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Decimal)
+      return this.equals((Decimal)that);
+    return false;
+  }
+
+  public boolean equals(Decimal that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_scale = true;
+    boolean that_present_scale = true;
+    if (this_present_scale || that_present_scale) {
+      if (!(this_present_scale && that_present_scale))
+        return false;
+      if (this.scale != that.scale)
+        return false;
+    }
+
+    boolean this_present_unscaled = true && this.isSetUnscaled();
+    boolean that_present_unscaled = true && that.isSetUnscaled();
+    if (this_present_unscaled || that_present_unscaled) {
+      if (!(this_present_unscaled && that_present_unscaled))
+        return false;
+      if (!this.unscaled.equals(that.unscaled))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_scale = true;
+    list.add(present_scale);
+    if (present_scale)
+      list.add(scale);
+
+    boolean present_unscaled = true && (isSetUnscaled());
+    list.add(present_unscaled);
+    if (present_unscaled)
+      list.add(unscaled);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Decimal other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetScale()).compareTo(other.isSetScale());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetScale()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.scale, other.scale);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetUnscaled()).compareTo(other.isSetUnscaled());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUnscaled()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.unscaled, other.unscaled);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Decimal(");
+    boolean first = true;
+
+    sb.append("scale:");
+    sb.append(this.scale);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("unscaled:");
+    if (this.unscaled == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.unscaled, sb);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetScale()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'scale' is unset! Struct:" + toString());
+    }
+
+    if (!isSetUnscaled()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'unscaled' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DecimalStandardSchemeFactory implements SchemeFactory {
+    public DecimalStandardScheme getScheme() {
+      return new DecimalStandardScheme();
+    }
+  }
+
+  private static class DecimalStandardScheme extends StandardScheme<Decimal> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Decimal struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 3: // SCALE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
+              struct.scale = iprot.readI16();
+              struct.setScaleIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 1: // UNSCALED
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.unscaled = iprot.readBinary();
+              struct.setUnscaledIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Decimal struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.unscaled != null) {
+        oprot.writeFieldBegin(UNSCALED_FIELD_DESC);
+        oprot.writeBinary(struct.unscaled);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(SCALE_FIELD_DESC);
+      oprot.writeI16(struct.scale);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DecimalTupleSchemeFactory implements SchemeFactory {
+    public DecimalTupleScheme getScheme() {
+      return new DecimalTupleScheme();
+    }
+  }
+
+  private static class DecimalTupleScheme extends TupleScheme<Decimal> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Decimal struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI16(struct.scale);
+      oprot.writeBinary(struct.unscaled);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Decimal struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.scale = iprot.readI16();
+      struct.setScaleIsSet(true);
+      struct.unscaled = iprot.readBinary();
+      struct.setUnscaledIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
new file mode 100644
index 0000000..33c4e53
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DecimalColumnStatsData.java
@@ -0,0 +1,823 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class DecimalColumnStatsData implements org.apache.thrift.TBase<DecimalColumnStatsData, DecimalColumnStatsData._Fields>, java.io.Serializable, Cloneable, Comparable<DecimalColumnStatsData> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("DecimalColumnStatsData");
+
+  private static final org.apache.thrift.protocol.TField LOW_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("lowValue", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+  private static final org.apache.thrift.protocol.TField HIGH_VALUE_FIELD_DESC = new org.apache.thrift.protocol.TField("highValue", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+  private static final org.apache.thrift.protocol.TField NUM_NULLS_FIELD_DESC = new org.apache.thrift.protocol.TField("numNulls", org.apache.thrift.protocol.TType.I64, (short)3);
+  private static final org.apache.thrift.protocol.TField NUM_DVS_FIELD_DESC = new org.apache.thrift.protocol.TField("numDVs", org.apache.thrift.protocol.TType.I64, (short)4);
+  private static final org.apache.thrift.protocol.TField BIT_VECTORS_FIELD_DESC = new org.apache.thrift.protocol.TField("bitVectors", org.apache.thrift.protocol.TType.STRING, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new DecimalColumnStatsDataStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new DecimalColumnStatsDataTupleSchemeFactory());
+  }
+
+  private Decimal lowValue; // optional
+  private Decimal highValue; // optional
+  private long numNulls; // required
+  private long numDVs; // required
+  private ByteBuffer bitVectors; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOW_VALUE((short)1, "lowValue"),
+    HIGH_VALUE((short)2, "highValue"),
+    NUM_NULLS((short)3, "numNulls"),
+    NUM_DVS((short)4, "numDVs"),
+    BIT_VECTORS((short)5, "bitVectors");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOW_VALUE
+          return LOW_VALUE;
+        case 2: // HIGH_VALUE
+          return HIGH_VALUE;
+        case 3: // NUM_NULLS
+          return NUM_NULLS;
+        case 4: // NUM_DVS
+          return NUM_DVS;
+        case 5: // BIT_VECTORS
+          return BIT_VECTORS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __NUMNULLS_ISSET_ID = 0;
+  private static final int __NUMDVS_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.LOW_VALUE,_Fields.HIGH_VALUE,_Fields.BIT_VECTORS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOW_VALUE, new org.apache.thrift.meta_data.FieldMetaData("lowValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Decimal.class)));
+    tmpMap.put(_Fields.HIGH_VALUE, new org.apache.thrift.meta_data.FieldMetaData("highValue", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Decimal.class)));
+    tmpMap.put(_Fields.NUM_NULLS, new org.apache.thrift.meta_data.FieldMetaData("numNulls", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.NUM_DVS, new org.apache.thrift.meta_data.FieldMetaData("numDVs", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.BIT_VECTORS, new org.apache.thrift.meta_data.FieldMetaData("bitVectors", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(DecimalColumnStatsData.class, metaDataMap);
+  }
+
+  public DecimalColumnStatsData() {
+  }
+
+  public DecimalColumnStatsData(
+    long numNulls,
+    long numDVs)
+  {
+    this();
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public DecimalColumnStatsData(DecimalColumnStatsData other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetLowValue()) {
+      this.lowValue = new Decimal(other.lowValue);
+    }
+    if (other.isSetHighValue()) {
+      this.highValue = new Decimal(other.highValue);
+    }
+    this.numNulls = other.numNulls;
+    this.numDVs = other.numDVs;
+    if (other.isSetBitVectors()) {
+      this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(other.bitVectors);
+    }
+  }
+
+  public DecimalColumnStatsData deepCopy() {
+    return new DecimalColumnStatsData(this);
+  }
+
+  @Override
+  public void clear() {
+    this.lowValue = null;
+    this.highValue = null;
+    setNumNullsIsSet(false);
+    this.numNulls = 0;
+    setNumDVsIsSet(false);
+    this.numDVs = 0;
+    this.bitVectors = null;
+  }
+
+  public Decimal getLowValue() {
+    return this.lowValue;
+  }
+
+  public void setLowValue(Decimal lowValue) {
+    this.lowValue = lowValue;
+  }
+
+  public void unsetLowValue() {
+    this.lowValue = null;
+  }
+
+  /** Returns true if field lowValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetLowValue() {
+    return this.lowValue != null;
+  }
+
+  public void setLowValueIsSet(boolean value) {
+    if (!value) {
+      this.lowValue = null;
+    }
+  }
+
+  public Decimal getHighValue() {
+    return this.highValue;
+  }
+
+  public void setHighValue(Decimal highValue) {
+    this.highValue = highValue;
+  }
+
+  public void unsetHighValue() {
+    this.highValue = null;
+  }
+
+  /** Returns true if field highValue is set (has been assigned a value) and false otherwise */
+  public boolean isSetHighValue() {
+    return this.highValue != null;
+  }
+
+  public void setHighValueIsSet(boolean value) {
+    if (!value) {
+      this.highValue = null;
+    }
+  }
+
+  public long getNumNulls() {
+    return this.numNulls;
+  }
+
+  public void setNumNulls(long numNulls) {
+    this.numNulls = numNulls;
+    setNumNullsIsSet(true);
+  }
+
+  public void unsetNumNulls() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  /** Returns true if field numNulls is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumNulls() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMNULLS_ISSET_ID);
+  }
+
+  public void setNumNullsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMNULLS_ISSET_ID, value);
+  }
+
+  public long getNumDVs() {
+    return this.numDVs;
+  }
+
+  public void setNumDVs(long numDVs) {
+    this.numDVs = numDVs;
+    setNumDVsIsSet(true);
+  }
+
+  public void unsetNumDVs() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  /** Returns true if field numDVs is set (has been assigned a value) and false otherwise */
+  public boolean isSetNumDVs() {
+    return EncodingUtils.testBit(__isset_bitfield, __NUMDVS_ISSET_ID);
+  }
+
+  public void setNumDVsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMDVS_ISSET_ID, value);
+  }
+
+  public byte[] getBitVectors() {
+    setBitVectors(org.apache.thrift.TBaseHelper.rightSize(bitVectors));
+    return bitVectors == null ? null : bitVectors.array();
+  }
+
+  public ByteBuffer bufferForBitVectors() {
+    return org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void setBitVectors(byte[] bitVectors) {
+    this.bitVectors = bitVectors == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(bitVectors, bitVectors.length));
+  }
+
+  public void setBitVectors(ByteBuffer bitVectors) {
+    this.bitVectors = org.apache.thrift.TBaseHelper.copyBinary(bitVectors);
+  }
+
+  public void unsetBitVectors() {
+    this.bitVectors = null;
+  }
+
+  /** Returns true if field bitVectors is set (has been assigned a value) and false otherwise */
+  public boolean isSetBitVectors() {
+    return this.bitVectors != null;
+  }
+
+  public void setBitVectorsIsSet(boolean value) {
+    if (!value) {
+      this.bitVectors = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOW_VALUE:
+      if (value == null) {
+        unsetLowValue();
+      } else {
+        setLowValue((Decimal)value);
+      }
+      break;
+
+    case HIGH_VALUE:
+      if (value == null) {
+        unsetHighValue();
+      } else {
+        setHighValue((Decimal)value);
+      }
+      break;
+
+    case NUM_NULLS:
+      if (value == null) {
+        unsetNumNulls();
+      } else {
+        setNumNulls((Long)value);
+      }
+      break;
+
+    case NUM_DVS:
+      if (value == null) {
+        unsetNumDVs();
+      } else {
+        setNumDVs((Long)value);
+      }
+      break;
+
+    case BIT_VECTORS:
+      if (value == null) {
+        unsetBitVectors();
+      } else {
+        setBitVectors((ByteBuffer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOW_VALUE:
+      return getLowValue();
+
+    case HIGH_VALUE:
+      return getHighValue();
+
+    case NUM_NULLS:
+      return getNumNulls();
+
+    case NUM_DVS:
+      return getNumDVs();
+
+    case BIT_VECTORS:
+      return getBitVectors();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOW_VALUE:
+      return isSetLowValue();
+    case HIGH_VALUE:
+      return isSetHighValue();
+    case NUM_NULLS:
+      return isSetNumNulls();
+    case NUM_DVS:
+      return isSetNumDVs();
+    case BIT_VECTORS:
+      return isSetBitVectors();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof DecimalColumnStatsData)
+      return this.equals((DecimalColumnStatsData)that);
+    return false;
+  }
+
+  public boolean equals(DecimalColumnStatsData that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lowValue = true && this.isSetLowValue();
+    boolean that_present_lowValue = true && that.isSetLowValue();
+    if (this_present_lowValue || that_present_lowValue) {
+      if (!(this_present_lowValue && that_present_lowValue))
+        return false;
+      if (!this.lowValue.equals(that.lowValue))
+        return false;
+    }
+
+    boolean this_present_highValue = true && this.isSetHighValue();
+    boolean that_present_highValue = true && that.isSetHighValue();
+    if (this_present_highValue || that_present_highValue) {
+      if (!(this_present_highValue && that_present_highValue))
+        return false;
+      if (!this.highValue.equals(that.highValue))
+        return false;
+    }
+
+    boolean this_present_numNulls = true;
+    boolean that_present_numNulls = true;
+    if (this_present_numNulls || that_present_numNulls) {
+      if (!(this_present_numNulls && that_present_numNulls))
+        return false;
+      if (this.numNulls != that.numNulls)
+        return false;
+    }
+
+    boolean this_present_numDVs = true;
+    boolean that_present_numDVs = true;
+    if (this_present_numDVs || that_present_numDVs) {
+      if (!(this_present_numDVs && that_present_numDVs))
+        return false;
+      if (this.numDVs != that.numDVs)
+        return false;
+    }
+
+    boolean this_present_bitVectors = true && this.isSetBitVectors();
+    boolean that_present_bitVectors = true && that.isSetBitVectors();
+    if (this_present_bitVectors || that_present_bitVectors) {
+      if (!(this_present_bitVectors && that_present_bitVectors))
+        return false;
+      if (!this.bitVectors.equals(that.bitVectors))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lowValue = true && (isSetLowValue());
+    list.add(present_lowValue);
+    if (present_lowValue)
+      list.add(lowValue);
+
+    boolean present_highValue = true && (isSetHighValue());
+    list.add(present_highValue);
+    if (present_highValue)
+      list.add(highValue);
+
+    boolean present_numNulls = true;
+    list.add(present_numNulls);
+    if (present_numNulls)
+      list.add(numNulls);
+
+    boolean present_numDVs = true;
+    list.add(present_numDVs);
+    if (present_numDVs)
+      list.add(numDVs);
+
+    boolean present_bitVectors = true && (isSetBitVectors());
+    list.add(present_bitVectors);
+    if (present_bitVectors)
+      list.add(bitVectors);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(DecimalColumnStatsData other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLowValue()).compareTo(other.isSetLowValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLowValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lowValue, other.lowValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHighValue()).compareTo(other.isSetHighValue());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHighValue()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.highValue, other.highValue);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumNulls()).compareTo(other.isSetNumNulls());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumNulls()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numNulls, other.numNulls);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNumDVs()).compareTo(other.isSetNumDVs());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNumDVs()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numDVs, other.numDVs);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetBitVectors()).compareTo(other.isSetBitVectors());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetBitVectors()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.bitVectors, other.bitVectors);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("DecimalColumnStatsData(");
+    boolean first = true;
+
+    if (isSetLowValue()) {
+      sb.append("lowValue:");
+      if (this.lowValue == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.lowValue);
+      }
+      first = false;
+    }
+    if (isSetHighValue()) {
+      if (!first) sb.append(", ");
+      sb.append("highValue:");
+      if (this.highValue == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.highValue);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("numNulls:");
+    sb.append(this.numNulls);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("numDVs:");
+    sb.append(this.numDVs);
+    first = false;
+    if (isSetBitVectors()) {
+      if (!first) sb.append(", ");
+      sb.append("bitVectors:");
+      if (this.bitVectors == null) {
+        sb.append("null");
+      } else {
+        org.apache.thrift.TBaseHelper.toString(this.bitVectors, sb);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNumNulls()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numNulls' is unset! Struct:" + toString());
+    }
+
+    if (!isSetNumDVs()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'numDVs' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+    if (lowValue != null) {
+      lowValue.validate();
+    }
+    if (highValue != null) {
+      highValue.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class DecimalColumnStatsDataStandardSchemeFactory implements SchemeFactory {
+    public DecimalColumnStatsDataStandardScheme getScheme() {
+      return new DecimalColumnStatsDataStandardScheme();
+    }
+  }
+
+  private static class DecimalColumnStatsDataStandardScheme extends StandardScheme<DecimalColumnStatsData> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, DecimalColumnStatsData struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOW_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.lowValue = new Decimal();
+              struct.lowValue.read(iprot);
+              struct.setLowValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // HIGH_VALUE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.highValue = new Decimal();
+              struct.highValue.read(iprot);
+              struct.setHighValueIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // NUM_NULLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numNulls = iprot.readI64();
+              struct.setNumNullsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // NUM_DVS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.numDVs = iprot.readI64();
+              struct.setNumDVsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // BIT_VECTORS
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.bitVectors = iprot.readBinary();
+              struct.setBitVectorsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, DecimalColumnStatsData struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.lowValue != null) {
+        if (struct.isSetLowValue()) {
+          oprot.writeFieldBegin(LOW_VALUE_FIELD_DESC);
+          struct.lowValue.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.highValue != null) {
+        if (struct.isSetHighValue()) {
+          oprot.writeFieldBegin(HIGH_VALUE_FIELD_DESC);
+          struct.highValue.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldBegin(NUM_NULLS_FIELD_DESC);
+      oprot.writeI64(struct.numNulls);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(NUM_DVS_FIELD_DESC);
+      oprot.writeI64(struct.numDVs);
+      oprot.writeFieldEnd();
+      if (struct.bitVectors != null) {
+        if (struct.isSetBitVectors()) {
+          oprot.writeFieldBegin(BIT_VECTORS_FIELD_DESC);
+          oprot.writeBinary(struct.bitVectors);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class DecimalColumnStatsDataTupleSchemeFactory implements SchemeFactory {
+    public DecimalColumnStatsDataTupleScheme getScheme() {
+      return new DecimalColumnStatsDataTupleScheme();
+    }
+  }
+
+  private static class DecimalColumnStatsDataTupleScheme extends TupleScheme<DecimalColumnStatsData> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, DecimalColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.numNulls);
+      oprot.writeI64(struct.numDVs);
+      BitSet optionals = new BitSet();
+      if (struct.isSetLowValue()) {
+        optionals.set(0);
+      }
+      if (struct.isSetHighValue()) {
+        optionals.set(1);
+      }
+      if (struct.isSetBitVectors()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetLowValue()) {
+        struct.lowValue.write(oprot);
+      }
+      if (struct.isSetHighValue()) {
+        struct.highValue.write(oprot);
+      }
+      if (struct.isSetBitVectors()) {
+        oprot.writeBinary(struct.bitVectors);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, DecimalColumnStatsData struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.numNulls = iprot.readI64();
+      struct.setNumNullsIsSet(true);
+      struct.numDVs = iprot.readI64();
+      struct.setNumDVsIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.lowValue = new Decimal();
+        struct.lowValue.read(iprot);
+        struct.setLowValueIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.highValue = new Decimal();
+        struct.highValue.read(iprot);
+        struct.setHighValueIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.bitVectors = iprot.readBinary();
+        struct.setBitVectorsIsSet(true);
+      }
+    }
+  }
+
+}
+


[58/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
index 0000000,1ca6454..1b4f01a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
+++ b/standalone-metastore/metastore-common/src/main/thrift/hive_metastore.thrift
@@@ -1,0 -1,2275 +1,2318 @@@
+ #!/usr/local/bin/thrift -java
+ 
+ /**
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ #
+ # Thrift Service that the MetaStore is built on
+ #
+ 
+ include "share/fb303/if/fb303.thrift"
+ 
+ namespace java org.apache.hadoop.hive.metastore.api
+ namespace php metastore
+ namespace cpp Apache.Hadoop.Hive
+ 
+ const string DDL_TIME = "transient_lastDdlTime"
+ 
+ struct Version {
+   1: string version,
+   2: string comments
+ }
+ 
+ struct FieldSchema {
+   1: string name, // name of the field
+   2: string type, // type of the field. primitive types defined above, specify list<TYPE_NAME>, map<TYPE_NAME, TYPE_NAME> for lists & maps
+   3: string comment
+ }
+ 
+ struct SQLPrimaryKey {
+   1: string table_db,    // table schema
+   2: string table_name,  // table name
+   3: string column_name, // column name
+   4: i32 key_seq,        // sequence number within primary key
+   5: string pk_name,     // primary key name
+   6: bool enable_cstr,   // Enable/Disable
+   7: bool validate_cstr, // Validate/No validate
+   8: bool rely_cstr,     // Rely/No Rely
+   9: optional string catName
+ }
+ 
+ struct SQLForeignKey {
+   1: string pktable_db,    // primary key table schema
+   2: string pktable_name,  // primary key table name
+   3: string pkcolumn_name, // primary key column name
+   4: string fktable_db,    // foreign key table schema
+   5: string fktable_name,  // foreign key table name
+   6: string fkcolumn_name, // foreign key column name
+   7: i32 key_seq,          // sequence within foreign key
+   8: i32 update_rule,      // what happens to foreign key when parent key is updated
+   9: i32 delete_rule,      // what happens to foreign key when parent key is deleted
+   10: string fk_name,      // foreign key name
+   11: string pk_name,      // primary key name
+   12: bool enable_cstr,    // Enable/Disable
+   13: bool validate_cstr,  // Validate/No validate
+   14: bool rely_cstr,      // Rely/No Rely
+   15: optional string catName
+ }
+ 
+ struct SQLUniqueConstraint {
+   1: string catName,     // table catalog
+   2: string table_db,    // table schema
+   3: string table_name,  // table name
+   4: string column_name, // column name
+   5: i32 key_seq,        // sequence number within unique constraint
+   6: string uk_name,     // unique key name
+   7: bool enable_cstr,   // Enable/Disable
+   8: bool validate_cstr, // Validate/No validate
+   9: bool rely_cstr,     // Rely/No Rely
+ }
+ 
+ struct SQLNotNullConstraint {
+   1: string catName,     // table catalog
+   2: string table_db,    // table schema
+   3: string table_name,  // table name
+   4: string column_name, // column name
+   5: string nn_name,     // not null name
+   6: bool enable_cstr,   // Enable/Disable
+   7: bool validate_cstr, // Validate/No validate
+   8: bool rely_cstr,     // Rely/No Rely
+ }
+ 
+ struct SQLDefaultConstraint {
+   1: string catName,     // catalog name
+   2: string table_db,    // table schema
+   3: string table_name,  // table name
+   4: string column_name, // column name
+   5: string default_value,// default value
+   6: string dc_name,     // default name
+   7: bool enable_cstr,   // Enable/Disable
+   8: bool validate_cstr, // Validate/No validate
+   9: bool rely_cstr      // Rely/No Rely
+ }
+ 
+ struct SQLCheckConstraint {
+   1: string catName,     // catalog name
+   2: string table_db,    // table schema
+   3: string table_name,  // table name
+   4: string column_name, // column name
+   5: string check_expression,// check expression
+   6: string dc_name,     // default name
+   7: bool enable_cstr,   // Enable/Disable
+   8: bool validate_cstr, // Validate/No validate
+   9: bool rely_cstr      // Rely/No Rely
+ }
+ 
+ struct Type {
+   1: string          name,             // one of the types in PrimitiveTypes or CollectionTypes or User defined types
+   2: optional string type1,            // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE)
+   3: optional string type2,            // val type if the name is 'map' (MAP_TYPE)
+   4: optional list<FieldSchema> fields // if the name is one of the user defined types
+ }
+ 
+ enum HiveObjectType {
+   GLOBAL = 1,
+   DATABASE = 2,
+   TABLE = 3,
+   PARTITION = 4,
+   COLUMN = 5,
+ }
+ 
+ enum PrincipalType {
+   USER = 1,
+   ROLE = 2,
+   GROUP = 3,
+ }
+ 
+ const string HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__"
+ const string HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__"
+ const string HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__"
+ 
+ enum PartitionEventType {
+   LOAD_DONE = 1,
+ }
+ 
+ // Enums for transaction and lock management 
+ enum TxnState {
+     COMMITTED = 1,
+     ABORTED = 2,
+     OPEN = 3,
+ }
+ 
+ enum LockLevel {
+     DB = 1,
+     TABLE = 2,
+     PARTITION = 3,
+ }
+ 
+ enum LockState {
+     ACQUIRED = 1,       // requester has the lock
+     WAITING = 2,        // requester is waiting for the lock and should call checklock at a later point to see if the lock has been obtained.
+     ABORT = 3,          // the lock has been aborted, most likely due to timeout
+     NOT_ACQUIRED = 4,   // returned only with lockNoWait, indicates the lock was not available and was not acquired
+ }
+ 
+ enum LockType {
+     SHARED_READ = 1,
+     SHARED_WRITE = 2,
+     EXCLUSIVE = 3,
+ }
+ 
+ enum CompactionType {
+     MINOR = 1,
+     MAJOR = 2,
+ }
+ 
+ enum GrantRevokeType {
+     GRANT = 1,
+     REVOKE = 2,
+ }
+ 
+ enum DataOperationType {
+     SELECT = 1,
+     INSERT = 2
+     UPDATE = 3,
+     DELETE = 4,
+     UNSET = 5,//this is the default to distinguish from NULL from old clients
+     NO_TXN = 6,//drop table, insert overwrite, etc - something non-transactional
+ }
+ 
+ // Types of events the client can request that the metastore fire.  For now just support DML operations, as the metastore knows
+ // about DDL operations and there's no reason for the client to request such an event.
+ enum EventRequestType {
+     INSERT = 1,
+     UPDATE = 2,
+     DELETE = 3,
+ }
+ 
+ enum SerdeType {
+   HIVE = 1,
+   SCHEMA_REGISTRY = 2,
+ }
+ 
+ enum SchemaType {
+   HIVE = 1,
+   AVRO = 2,
+ }
+ 
+ enum SchemaCompatibility {
+   NONE = 1,
+   BACKWARD = 2,
+   FORWARD = 3,
+   BOTH = 4
+ }
+ 
+ enum SchemaValidation {
+   LATEST = 1,
+   ALL = 2
+ }
+ 
+ enum SchemaVersionState {
+   INITIATED = 1,
+   START_REVIEW = 2,
+   CHANGES_REQUIRED = 3,
+   REVIEWED = 4,
+   ENABLED = 5,
+   DISABLED = 6,
+   ARCHIVED = 7,
+   DELETED = 8
+ }
+ 
+ struct HiveObjectRef{
+   1: HiveObjectType objectType,
+   2: string dbName,
+   3: string objectName,
+   4: list<string> partValues,
+   5: string columnName,
+   6: optional string catName
+ }
+ 
+ struct PrivilegeGrantInfo {
+   1: string privilege,
+   2: i32 createTime,
+   3: string grantor,
+   4: PrincipalType grantorType,
+   5: bool grantOption,
+ }
+ 
+ struct HiveObjectPrivilege {
+   1: HiveObjectRef  hiveObject,
+   2: string principalName,
+   3: PrincipalType principalType,
+   4: PrivilegeGrantInfo grantInfo,
+   5: string authorizer,
+ }
+ 
+ struct PrivilegeBag {
+   1: list<HiveObjectPrivilege> privileges,
+ }
+ 
+ struct PrincipalPrivilegeSet {
+   1: map<string, list<PrivilegeGrantInfo>> userPrivileges, // user name -> privilege grant info
+   2: map<string, list<PrivilegeGrantInfo>> groupPrivileges, // group name -> privilege grant info
+   3: map<string, list<PrivilegeGrantInfo>> rolePrivileges, //role name -> privilege grant info
+ }
+ 
+ struct GrantRevokePrivilegeRequest {
+   1: GrantRevokeType requestType;
+   2: PrivilegeBag privileges;
+   3: optional bool revokeGrantOption;  // Only for revoke request
+ }
+ 
+ struct GrantRevokePrivilegeResponse {
+   1: optional bool success;
+ }
+ 
+ struct Role {
+   1: string roleName,
+   2: i32 createTime,
+   3: string ownerName,
+ }
+ 
+ // Representation of a grant for a principal to a role
+ struct RolePrincipalGrant {
+   1: string roleName,
+   2: string principalName,
+   3: PrincipalType principalType,
+   4: bool grantOption,
+   5: i32 grantTime,
+   6: string grantorName,
+   7: PrincipalType grantorPrincipalType
+ }
+ 
+ struct GetRoleGrantsForPrincipalRequest {
+   1: required string principal_name,
+   2: required PrincipalType principal_type
+ }
+ 
+ struct GetRoleGrantsForPrincipalResponse {
+   1: required list<RolePrincipalGrant> principalGrants;
+ }
+ 
+ struct GetPrincipalsInRoleRequest {
+   1: required string roleName;
+ }
+ 
+ struct GetPrincipalsInRoleResponse {
+   1: required list<RolePrincipalGrant> principalGrants;
+ }
+ 
+ struct GrantRevokeRoleRequest {
+   1: GrantRevokeType requestType;
+   2: string roleName;
+   3: string principalName;
+   4: PrincipalType principalType;
+   5: optional string grantor;            // Needed for grant
+   6: optional PrincipalType grantorType; // Needed for grant
+   7: optional bool grantOption;
+ }
+ 
+ struct GrantRevokeRoleResponse {
+   1: optional bool success;
+ }
+ 
+ struct Catalog {
+   1: string name,                    // Name of the catalog
+   2: optional string description,    // description of the catalog
+   3: string locationUri              // default storage location.  When databases are created in
+                                      // this catalog, if they do not specify a location, they will
+                                      // be placed in this location.
+ }
+ 
+ struct CreateCatalogRequest {
+   1: Catalog catalog
+ }
+ 
+ struct AlterCatalogRequest {
+   1: string name,
+   2: Catalog newCat
+ }
+ 
+ struct GetCatalogRequest {
+   1: string name
+ }
+ 
+ struct GetCatalogResponse {
+   1: Catalog catalog
+ }
+ 
+ struct GetCatalogsResponse {
+   1: list<string> names
+ }
+ 
+ struct DropCatalogRequest {
+   1: string name
+ }
+ 
+ // namespace for tables
+ struct Database {
+   1: string name,
+   2: string description,
+   3: string locationUri,
+   4: map<string, string> parameters, // properties associated with the database
+   5: optional PrincipalPrivilegeSet privileges,
+   6: optional string ownerName,
+   7: optional PrincipalType ownerType,
+   8: optional string catalogName
+ }
+ 
+ // This object holds the information needed by SerDes
+ struct SerDeInfo {
+   1: string name,                   // name of the serde, table name by default
+   2: string serializationLib,       // usually the class that implements the extractor & loader
+   3: map<string, string> parameters, // initialization parameters
+   4: optional string description,
+   5: optional string serializerClass,
+   6: optional string deserializerClass,
+   7: optional SerdeType serdeType
+ }
+ 
+ // sort order of a column (column name along with asc(1)/desc(0))
+ struct Order {
+   1: string col,      // sort column name
+   2: i32    order     // asc(1) or desc(0)
+ }
+ 
+ // this object holds all the information about skewed table
+ struct SkewedInfo {
+   1: list<string> skewedColNames, // skewed column names
+   2: list<list<string>> skewedColValues, //skewed values
+   3: map<list<string>, string> skewedColValueLocationMaps, //skewed value to location mappings
+ }
+ 
+ // this object holds all the information about physical storage of the data belonging to a table
+ struct StorageDescriptor {
+   1: list<FieldSchema> cols,  // required (refer to types defined above)
+   2: string location,         // defaults to <warehouse loc>/<db loc>/tablename
+   3: string inputFormat,      // SequenceFileInputFormat (binary) or TextInputFormat`  or custom format
+   4: string outputFormat,     // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format
+   5: bool   compressed,       // compressed or not
+   6: i32    numBuckets,       // this must be specified if there are any dimension columns
+   7: SerDeInfo    serdeInfo,  // serialization and deserialization information
+   8: list<string> bucketCols, // reducer grouping columns and clustering columns and bucketing columns`
+   9: list<Order>  sortCols,   // sort order of the data in each bucket
+   10: map<string, string> parameters, // any user supplied key value hash
+   11: optional SkewedInfo skewedInfo, // skewed information
+   12: optional bool   storedAsSubDirectories       // stored as subdirectories or not
+ }
+ 
+ // table information
+ struct Table {
+   1: string tableName,                // name of the table
+   2: string dbName,                   // database name ('default')
+   3: string owner,                    // owner of this table
+   4: i32    createTime,               // creation time of the table
+   5: i32    lastAccessTime,           // last access time (usually this will be filled from HDFS and shouldn't be relied on)
+   6: i32    retention,                // retention time
+   7: StorageDescriptor sd,            // storage descriptor of the table
+   8: list<FieldSchema> partitionKeys, // partition keys of the table. only primitive types are supported
+   9: map<string, string> parameters,   // to store comments or any other user level parameters
+   10: string viewOriginalText,         // original view text, null for non-view
+   11: string viewExpandedText,         // expanded view text, null for non-view
+   12: string tableType,                // table type enum, e.g. EXTERNAL_TABLE
+   13: optional PrincipalPrivilegeSet privileges,
+   14: optional bool temporary=false,
+   15: optional bool rewriteEnabled,     // rewrite enabled or not
+   16: optional CreationMetadata creationMetadata,   // only for MVs, it stores table names used and txn list at MV creation
+   17: optional string catName,          // Name of the catalog the table is in
 -  18: optional PrincipalType ownerType = PrincipalType.USER // owner type of this table (default to USER for backward compatibility)
++  18: optional PrincipalType ownerType = PrincipalType.USER, // owner type of this table (default to USER for backward compatibility)
++  19: optional i64 writeId=-1,
++  20: optional bool isStatsCompliant
+ }
+ 
+ struct Partition {
+   1: list<string> values // string value is converted to appropriate partition key type
+   2: string       dbName,
+   3: string       tableName,
+   4: i32          createTime,
+   5: i32          lastAccessTime,
+   6: StorageDescriptor   sd,
+   7: map<string, string> parameters,
+   8: optional PrincipalPrivilegeSet privileges,
 -  9: optional string catName
++  9: optional string catName,
++  10: optional i64 writeId=-1,
++  11: optional bool isStatsCompliant
+ }
+ 
+ struct PartitionWithoutSD {
+   1: list<string> values // string value is converted to appropriate partition key type
+   2: i32          createTime,
+   3: i32          lastAccessTime,
+   4: string       relativePath,
+   5: map<string, string> parameters,
+   6: optional PrincipalPrivilegeSet privileges
+ }
+ 
+ struct PartitionSpecWithSharedSD {
+   1: list<PartitionWithoutSD> partitions,
+   2: StorageDescriptor sd,
+ }
+ 
+ struct PartitionListComposingSpec {
+   1: list<Partition> partitions
+ }
+ 
+ struct PartitionSpec {
+   1: string dbName,
+   2: string tableName,
+   3: string rootPath,
+   4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec,
+   5: optional PartitionListComposingSpec partitionList,
 -  6: optional string catName
++  6: optional string catName,
++  7: optional i64 writeId=-1,
++  8: optional bool isStatsCompliant
+ }
+ 
+ // column statistics
+ struct BooleanColumnStatsData {
+ 1: required i64 numTrues,
+ 2: required i64 numFalses,
+ 3: required i64 numNulls,
+ 4: optional binary bitVectors
+ }
+ 
+ struct DoubleColumnStatsData {
+ 1: optional double lowValue,
+ 2: optional double highValue,
+ 3: required i64 numNulls,
+ 4: required i64 numDVs,
+ 5: optional binary bitVectors
+ }
+ 
+ struct LongColumnStatsData {
+ 1: optional i64 lowValue,
+ 2: optional i64 highValue,
+ 3: required i64 numNulls,
+ 4: required i64 numDVs,
+ 5: optional binary bitVectors
+ }
+ 
+ struct StringColumnStatsData {
+ 1: required i64 maxColLen,
+ 2: required double avgColLen,
+ 3: required i64 numNulls,
+ 4: required i64 numDVs,
+ 5: optional binary bitVectors
+ }
+ 
+ struct BinaryColumnStatsData {
+ 1: required i64 maxColLen,
+ 2: required double avgColLen,
+ 3: required i64 numNulls,
+ 4: optional binary bitVectors
+ }
+ 
+ 
+ struct Decimal {
+ 3: required i16 scale, // force using scale first in Decimal.compareTo
+ 1: required binary unscaled
+ }
+ 
+ struct DecimalColumnStatsData {
+ 1: optional Decimal lowValue,
+ 2: optional Decimal highValue,
+ 3: required i64 numNulls,
+ 4: required i64 numDVs,
+ 5: optional binary bitVectors
+ }
+ 
+ struct Date {
+ 1: required i64 daysSinceEpoch
+ }
+ 
+ struct DateColumnStatsData {
+ 1: optional Date lowValue,
+ 2: optional Date highValue,
+ 3: required i64 numNulls,
+ 4: required i64 numDVs,
+ 5: optional binary bitVectors
+ }
+ 
+ union ColumnStatisticsData {
+ 1: BooleanColumnStatsData booleanStats,
+ 2: LongColumnStatsData longStats,
+ 3: DoubleColumnStatsData doubleStats,
+ 4: StringColumnStatsData stringStats,
+ 5: BinaryColumnStatsData binaryStats,
+ 6: DecimalColumnStatsData decimalStats,
+ 7: DateColumnStatsData dateStats
+ }
+ 
+ struct ColumnStatisticsObj {
+ 1: required string colName,
+ 2: required string colType,
+ 3: required ColumnStatisticsData statsData
+ }
+ 
+ struct ColumnStatisticsDesc {
+ 1: required bool isTblLevel,
+ 2: required string dbName,
+ 3: required string tableName,
+ 4: optional string partName,
+ 5: optional i64 lastAnalyzed,
+ 6: optional string catName
+ }
+ 
+ struct ColumnStatistics {
+ 1: required ColumnStatisticsDesc statsDesc,
 -2: required list<ColumnStatisticsObj> statsObj;
++2: required list<ColumnStatisticsObj> statsObj,
++3: optional i64 txnId=-1,            // transaction id of the query that sends this structure TODO## needed?
++4: optional string validWriteIdList, // valid write id list for the table for which this struct is being sent
++5: optional bool isStatsCompliant // Are the stats isolation-level-compliant with the
++                                                      // the calling query?
+ }
+ 
+ struct AggrStats {
+ 1: required list<ColumnStatisticsObj> colStats,
 -2: required i64 partsFound // number of partitions for which stats were found
++2: required i64 partsFound, // number of partitions for which stats were found
++3: optional bool isStatsCompliant
+ }
+ 
+ struct SetPartitionsStatsRequest {
+ 1: required list<ColumnStatistics> colStats,
 -2: optional bool needMerge //stats need to be merged with the existing stats
++2: optional bool needMerge, //stats need to be merged with the existing stats
++3: optional i64 txnId=-1,   // transaction id of the query that sends this structure
++4: optional i64 writeId=-1,         // writeId for the current query that updates the stats
++5: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
+ }
+ 
+ // schema of the table/query results etc.
+ struct Schema {
+  // column names, types, comments
+  1: list<FieldSchema> fieldSchemas,  // delimiters etc
+  2: map<string, string> properties
+ }
+ 
+ // Key-value store to be used with selected
+ // Metastore APIs (create, alter methods).
+ // The client can pass environment properties / configs that can be
+ // accessed in hooks.
+ struct EnvironmentContext {
+   1: map<string, string> properties
+ }
+ 
+ struct PrimaryKeysRequest {
+   1: required string db_name,
+   2: required string tbl_name,
+   3: optional string catName
+ }
+ 
+ struct PrimaryKeysResponse {
+   1: required list<SQLPrimaryKey> primaryKeys
+ }
+ 
+ struct ForeignKeysRequest {
+   1: string parent_db_name,
+   2: string parent_tbl_name,
+   3: string foreign_db_name,
+   4: string foreign_tbl_name
+   5: optional string catName          // No cross catalog constraints
+ }
+ 
+ struct ForeignKeysResponse {
+   1: required list<SQLForeignKey> foreignKeys
+ }
+ 
+ struct UniqueConstraintsRequest {
+   1: required string catName,
+   2: required string db_name,
+   3: required string tbl_name,
+ }
+ 
+ struct UniqueConstraintsResponse {
+   1: required list<SQLUniqueConstraint> uniqueConstraints
+ }
+ 
+ struct NotNullConstraintsRequest {
+   1: required string catName,
+   2: required string db_name,
+   3: required string tbl_name,
+ }
+ 
+ struct NotNullConstraintsResponse {
+   1: required list<SQLNotNullConstraint> notNullConstraints
+ }
+ 
+ struct DefaultConstraintsRequest {
+   1: required string catName,
+   2: required string db_name,
+   3: required string tbl_name
+ }
+ 
+ struct DefaultConstraintsResponse {
+   1: required list<SQLDefaultConstraint> defaultConstraints
+ }
+ 
+ struct CheckConstraintsRequest {
+   1: required string catName,
+   2: required string db_name,
+   3: required string tbl_name
+ }
+ 
+ struct CheckConstraintsResponse {
+   1: required list<SQLCheckConstraint> checkConstraints
+ }
+ 
+ 
+ struct DropConstraintRequest {
+   1: required string dbname, 
+   2: required string tablename,
+   3: required string constraintname,
+   4: optional string catName
+ }
+ 
+ struct AddPrimaryKeyRequest {
+   1: required list<SQLPrimaryKey> primaryKeyCols
+ }
+ 
+ struct AddForeignKeyRequest {
+   1: required list<SQLForeignKey> foreignKeyCols
+ }
+ 
+ struct AddUniqueConstraintRequest {
+   1: required list<SQLUniqueConstraint> uniqueConstraintCols
+ }
+ 
+ struct AddNotNullConstraintRequest {
+   1: required list<SQLNotNullConstraint> notNullConstraintCols
+ }
+ 
+ struct AddDefaultConstraintRequest {
+   1: required list<SQLDefaultConstraint> defaultConstraintCols
+ }
+ 
+ struct AddCheckConstraintRequest {
+   1: required list<SQLCheckConstraint> checkConstraintCols
+ }
+ 
+ // Return type for get_partitions_by_expr
+ struct PartitionsByExprResult {
+   1: required list<Partition> partitions,
+   // Whether the results has any (currently, all) partitions which may or may not match
+   2: required bool hasUnknownPartitions
+ }
+ 
+ struct PartitionsByExprRequest {
+   1: required string dbName,
+   2: required string tblName,
+   3: required binary expr,
+   4: optional string defaultPartitionName,
+   5: optional i16 maxParts=-1
+   6: optional string catName
+ }
+ 
+ struct TableStatsResult {
 -  1: required list<ColumnStatisticsObj> tableStats
++  1: required list<ColumnStatisticsObj> tableStats,
++  2: optional bool isStatsCompliant
+ }
+ 
+ struct PartitionsStatsResult {
 -  1: required map<string, list<ColumnStatisticsObj>> partStats
++  1: required map<string, list<ColumnStatisticsObj>> partStats,
++  2: optional bool isStatsCompliant
+ }
+ 
+ struct TableStatsRequest {
+  1: required string dbName,
+  2: required string tblName,
+  3: required list<string> colNames
 - 4: optional string catName
++ 4: optional string catName,
++ 5: optional i64 txnId=-1,            // transaction id of the query that sends this structure
++ 6: optional string validWriteIdList  // valid write id list for the table for which this struct is being sent
+ }
+ 
+ struct PartitionsStatsRequest {
+  1: required string dbName,
+  2: required string tblName,
+  3: required list<string> colNames,
+  4: required list<string> partNames,
 - 5: optional string catName
++ 5: optional string catName,
++ 6: optional i64 txnId=-1,           // transaction id of the query that sends this structure
++ 7: optional string validWriteIdList // valid write id list for the table for which this struct is being sent
+ }
+ 
+ // Return type for add_partitions_req
+ struct AddPartitionsResult {
+   1: optional list<Partition> partitions,
++  2: optional bool isStatsCompliant
+ }
+ 
+ // Request type for add_partitions_req
+ struct AddPartitionsRequest {
+   1: required string dbName,
+   2: required string tblName,
+   3: required list<Partition> parts,
+   4: required bool ifNotExists,
+   5: optional bool needResult=true,
 -  6: optional string catName
++  6: optional string catName,
++  7: optional i64 txnId=-1,
++  8: optional string validWriteIdList
+ }
+ 
+ // Return type for drop_partitions_req
+ struct DropPartitionsResult {
+   1: optional list<Partition> partitions,
+ }
+ 
+ struct DropPartitionsExpr {
+   1: required binary expr;
+   2: optional i32 partArchiveLevel;
+ }
+ 
+ union RequestPartsSpec {
+   1: list<string> names;
+   2: list<DropPartitionsExpr> exprs;
+ }
+ 
+ // Request type for drop_partitions_req
+ // TODO: we might want to add "bestEffort" flag; where a subset can fail
+ struct DropPartitionsRequest {
+   1: required string dbName,
+   2: required string tblName,
+   3: required RequestPartsSpec parts,
+   4: optional bool deleteData,
+   5: optional bool ifExists=true, // currently verified on client
+   6: optional bool ignoreProtection,
+   7: optional EnvironmentContext environmentContext,
+   8: optional bool needResult=true,
+   9: optional string catName
+ }
+ 
+ struct PartitionValuesRequest {
+   1: required string dbName,
+   2: required string tblName,
+   3: required list<FieldSchema> partitionKeys;
+   4: optional bool applyDistinct = true;
+   5: optional string filter;
+   6: optional list<FieldSchema> partitionOrder;
+   7: optional bool ascending = true;
+   8: optional i64 maxParts = -1;
+   9: optional string catName
+ }
+ 
+ struct PartitionValuesRow {
+   1: required list<string> row;
+ }
+ 
+ struct PartitionValuesResponse {
+   1: required list<PartitionValuesRow> partitionValues;
+ }
+ 
+ enum FunctionType {
+   JAVA = 1,
+ }
+ 
+ enum ResourceType {
+   JAR     = 1,
+   FILE    = 2,
+   ARCHIVE = 3,
+ }
+ 
+ struct ResourceUri {
+   1: ResourceType resourceType,
+   2: string       uri,
+ }
+ 
+ // User-defined function
+ struct Function {
+   1: string           functionName,
+   2: string           dbName,
+   3: string           className,
+   4: string           ownerName,
+   5: PrincipalType    ownerType,
+   6: i32              createTime,
+   7: FunctionType     functionType,
+   8: list<ResourceUri> resourceUris,
+   9: optional string  catName
+ }
+ 
+ // Structs for transaction and locks
+ struct TxnInfo {
+     1: required i64 id,
+     2: required TxnState state,
+     3: required string user,        // used in 'show transactions' to help admins find who has open transactions
+     4: required string hostname,    // used in 'show transactions' to help admins find who has open transactions
+     5: optional string agentInfo = "Unknown",
+     6: optional i32 heartbeatCount=0,
+     7: optional string metaInfo,
+     8: optional i64 startedTime,
+     9: optional i64 lastHeartbeatTime,
+ }
+ 
+ struct GetOpenTxnsInfoResponse {
+     1: required i64 txn_high_water_mark,
+     2: required list<TxnInfo> open_txns,
+ }
+ 
+ struct GetOpenTxnsResponse {
+     1: required i64 txn_high_water_mark,
+     2: required list<i64> open_txns,  // set<i64> changed to list<i64> since 3.0
+     3: optional i64 min_open_txn, //since 1.3,2.2
+     4: required binary abortedBits,   // since 3.0
+ }
+ 
+ struct OpenTxnRequest {
+     1: required i32 num_txns,
+     2: required string user,
+     3: required string hostname,
+     4: optional string agentInfo = "Unknown",
+     5: optional string replPolicy,
+     6: optional list<i64> replSrcTxnIds,
+ }
+ 
+ struct OpenTxnsResponse {
+     1: required list<i64> txn_ids,
+ }
+ 
+ struct AbortTxnRequest {
+     1: required i64 txnid,
+     2: optional string replPolicy,
+ }
+ 
+ struct AbortTxnsRequest {
+     1: required list<i64> txn_ids,
+ }
+ 
+ struct CommitTxnRequest {
+     1: required i64 txnid,
+     2: optional string replPolicy,
+     // Information related to write operations done in this transaction.
+     3: optional list<WriteEventInfo> writeEventInfos,
+ }
+ 
+ struct WriteEventInfo {
+     1: required i64    writeId,
+     2: required string database,
+     3: required string table,
+     4: required string files,
+     5: optional string partition,
+     6: optional string tableObj, // repl txn task does not need table object for commit
+     7: optional string partitionObj,
+ }
+ 
+ struct ReplTblWriteIdStateRequest {
+     1: required string validWriteIdlist,
+     2: required string user,
+     3: required string hostName,
+     4: required string dbName,
+     5: required string tableName,
+     6: optional list<string> partNames,
+ }
+ 
+ // Request msg to get the valid write ids list for the given list of tables wrt to input validTxnList
+ struct GetValidWriteIdsRequest {
+     1: required list<string> fullTableNames, // Full table names of format <db_name>.<table_name>
+     2: required string validTxnList, // Valid txn list string wrt the current txn of the caller
+ }
+ 
+ // Valid Write ID list of one table wrt to current txn
+ struct TableValidWriteIds {
+     1: required string fullTableName,  // Full table name of format <db_name>.<table_name>
+     2: required i64 writeIdHighWaterMark, // The highest write id valid for this table wrt given txn
+     3: required list<i64> invalidWriteIds, // List of open and aborted writes ids in the table
+     4: optional i64 minOpenWriteId, // Minimum write id which maps to a opened txn
+     5: required binary abortedBits, // Bit array to identify the aborted write ids in invalidWriteIds list
+ }
+ 
+ // Valid Write ID list for all the input tables wrt to current txn
+ struct GetValidWriteIdsResponse {
+     1: required list<TableValidWriteIds> tblValidWriteIds,
+ }
+ 
+ // Request msg to allocate table write ids for the given list of txns
+ struct AllocateTableWriteIdsRequest {
+     1: required string dbName,
+     2: required string tableName,
+     // Either txnIds or replPolicy+srcTxnToWriteIdList can exist in a call. txnIds is used by normal flow and
+     // replPolicy+srcTxnToWriteIdList is used by replication task.
+     3: optional list<i64> txnIds,
+     4: optional string replPolicy,
+     // The list is assumed to be sorted by both txnids and write ids. The write id list is assumed to be contiguous.
+     5: optional list<TxnToWriteId> srcTxnToWriteIdList,
+ }
+ 
+ // Map for allocated write id against the txn for which it is allocated
+ struct TxnToWriteId {
+     1: required i64 txnId,
+     2: required i64 writeId,
+ }
+ 
+ struct AllocateTableWriteIdsResponse {
+     1: required list<TxnToWriteId> txnToWriteIds,
+ }
+ 
+ struct LockComponent {
+     1: required LockType type,
+     2: required LockLevel level,
+     3: required string dbname,
+     4: optional string tablename,
+     5: optional string partitionname,
+     6: optional DataOperationType operationType = DataOperationType.UNSET,
+     7: optional bool isTransactional = false,
+     8: optional bool isDynamicPartitionWrite = false
+ }
+ 
+ struct LockRequest {
+     1: required list<LockComponent> component,
+     2: optional i64 txnid,
+     3: required string user,     // used in 'show locks' to help admins find who has open locks
+     4: required string hostname, // used in 'show locks' to help admins find who has open locks
+     5: optional string agentInfo = "Unknown",
+ }
+ 
+ struct LockResponse {
+     1: required i64 lockid,
+     2: required LockState state,
+ }
+ 
+ struct CheckLockRequest {
+     1: required i64 lockid,
+     2: optional i64 txnid,
+     3: optional i64 elapsed_ms,
+ }
+ 
+ struct UnlockRequest {
+     1: required i64 lockid,
+ }
+ 
+ struct ShowLocksRequest {
+     1: optional string dbname,
+     2: optional string tablename,
+     3: optional string partname,
+     4: optional bool isExtended=false,
+ }
+ 
+ struct ShowLocksResponseElement {
+     1: required i64 lockid,
+     2: required string dbname,
+     3: optional string tablename,
+     4: optional string partname,
+     5: required LockState state,
+     6: required LockType type,
+     7: optional i64 txnid,
+     8: required i64 lastheartbeat,
+     9: optional i64 acquiredat,
+     10: required string user,
+     11: required string hostname,
+     12: optional i32 heartbeatCount = 0,
+     13: optional string agentInfo,
+     14: optional i64 blockedByExtId,
+     15: optional i64 blockedByIntId,
+     16: optional i64 lockIdInternal,
+ }
+ 
+ struct ShowLocksResponse {
+     1: list<ShowLocksResponseElement> locks,
+ }
+ 
+ struct HeartbeatRequest {
+     1: optional i64 lockid,
+     2: optional i64 txnid
+ }
+ 
+ struct HeartbeatTxnRangeRequest {
+     1: required i64 min,
+     2: required i64 max
+ }
+ 
+ struct HeartbeatTxnRangeResponse {
+     1: required set<i64> aborted,
+     2: required set<i64> nosuch
+ }
+ 
+ struct CompactionRequest {
+     1: required string dbname,
+     2: required string tablename,
+     3: optional string partitionname,
+     4: required CompactionType type,
+     5: optional string runas,
+     6: optional map<string, string> properties
+ }
+ 
+ struct CompactionResponse {
+     1: required i64 id,
+     2: required string state,
+     3: required bool accepted
+ }
+ 
+ struct ShowCompactRequest {
+ }
+ 
+ struct ShowCompactResponseElement {
+     1: required string dbname,
+     2: required string tablename,
+     3: optional string partitionname,
+     4: required CompactionType type,
+     5: required string state,
+     6: optional string workerid,
+     7: optional i64 start,
+     8: optional string runAs,
+     9: optional i64 hightestTxnId, // Highest Txn ID handled by this compaction
+     10: optional string metaInfo,
+     11: optional i64 endTime,
+     12: optional string hadoopJobId = "None",
+     13: optional i64 id,
+ }
+ 
+ struct ShowCompactResponse {
+     1: required list<ShowCompactResponseElement> compacts,
+ }
+ 
+ struct AddDynamicPartitions {
+     1: required i64 txnid,
+     2: required i64 writeid,
+     3: required string dbname,
+     4: required string tablename,
+     5: required list<string> partitionnames,
+     6: optional DataOperationType operationType = DataOperationType.UNSET
+ }
+ 
+ struct BasicTxnInfo {
+     1: required bool isnull,
+     2: optional i64 time,
+     3: optional i64 txnid,
+     4: optional string dbname,
+     5: optional string tablename,
+     6: optional string partitionname
+ }
+ 
+ struct CreationMetadata {
+     1: required string catName
+     2: required string dbName,
+     3: required string tblName,
+     4: required set<string> tablesUsed,
+     5: optional string validTxnList,
+ }
+ 
+ struct NotificationEventRequest {
+     1: required i64 lastEvent,
+     2: optional i32 maxEvents,
+ }
+ 
+ struct NotificationEvent {
+     1: required i64 eventId,
+     2: required i32 eventTime,
+     3: required string eventType,
+     4: optional string dbName,
+     5: optional string tableName,
+     6: required string message,
+     7: optional string messageFormat,
+     8: optional string catName
+ }
+ 
+ struct NotificationEventResponse {
+     1: required list<NotificationEvent> events,
+ }
+ 
+ struct CurrentNotificationEventId {
+     1: required i64 eventId,
+ }
+ 
+ struct NotificationEventsCountRequest {
+     1: required i64 fromEventId,
+     2: required string dbName,
+     3: optional string catName
+ }
+ 
+ struct NotificationEventsCountResponse {
+     1: required i64 eventsCount,
+ }
+ 
+ struct InsertEventRequestData {
+     1: optional bool replace,
+     2: required list<string> filesAdded,
+     // Checksum of files (hex string of checksum byte payload)
+     3: optional list<string> filesAddedChecksum,
+     // Used by acid operation to create the sub directory
+     4: optional list<string> subDirectoryList,
+ }
+ 
+ union FireEventRequestData {
+     1: InsertEventRequestData insertData
+ }
+ 
+ struct FireEventRequest {
+     1: required bool successful,
+     2: required FireEventRequestData data
+     // dbname, tablename, and partition vals are included as optional in the top level event rather than placed in each type of
+     // subevent as I assume they'll be used across most event types.
+     3: optional string dbName,
+     4: optional string tableName,
+     5: optional list<string> partitionVals,
+     6: optional string catName,
+ }
+ 
+ struct FireEventResponse {
+     // NOP for now, this is just a place holder for future responses
+ }
+ 
+ struct WriteNotificationLogRequest {
+     1: required i64 txnId,
+     2: required i64 writeId,
+     3: required string db,
+     4: required string table,
+     5: required InsertEventRequestData fileInfo,
+     6: optional list<string> partitionVals,
+ }
+ 
+ struct WriteNotificationLogResponse {
+     // NOP for now, this is just a place holder for future responses
+ }
+ 
+ struct MetadataPpdResult {
+   1: optional binary metadata,
+   2: optional binary includeBitset
+ }
+ 
+ // Return type for get_file_metadata_by_expr
+ struct GetFileMetadataByExprResult {
+   1: required map<i64, MetadataPpdResult> metadata,
+   2: required bool isSupported
+ }
+ 
+ enum FileMetadataExprType {
+   ORC_SARG = 1
+ }
+ 
+ 
+ // Request type for get_file_metadata_by_expr
+ struct GetFileMetadataByExprRequest {
+   1: required list<i64> fileIds,
+   2: required binary expr,
+   3: optional bool doGetFooters,
+   4: optional FileMetadataExprType type
+ }
+ 
+ // Return type for get_file_metadata
+ struct GetFileMetadataResult {
+   1: required map<i64, binary> metadata,
+   2: required bool isSupported
+ }
+ 
+ // Request type for get_file_metadata
+ struct GetFileMetadataRequest {
+   1: required list<i64> fileIds
+ }
+ 
+ // Return type for put_file_metadata
+ struct PutFileMetadataResult {
+ }
+ 
+ // Request type for put_file_metadata
+ struct PutFileMetadataRequest {
+   1: required list<i64> fileIds,
+   2: required list<binary> metadata,
+   3: optional FileMetadataExprType type
+ }
+ 
+ // Return type for clear_file_metadata
+ struct ClearFileMetadataResult {
+ }
+ 
+ // Request type for clear_file_metadata
+ struct ClearFileMetadataRequest {
+   1: required list<i64> fileIds
+ }
+ 
+ // Return type for cache_file_metadata
+ struct CacheFileMetadataResult {
+   1: required bool isSupported
+ }
+ 
+ // Request type for cache_file_metadata
+ struct CacheFileMetadataRequest {
+   1: required string dbName,
+   2: required string tblName,
+   3: optional string partName,
+   4: optional bool isAllParts
+ }
+ 
+ struct GetAllFunctionsResponse {
+   1: optional list<Function> functions
+ }
+ 
+ enum ClientCapability {
+   TEST_CAPABILITY = 1,
+   INSERT_ONLY_TABLES = 2
+ }
+ 
+ 
+ struct ClientCapabilities {
+   1: required list<ClientCapability> values
+ }
+ 
+ struct GetTableRequest {
+   1: required string dbName,
+   2: required string tblName,
+   3: optional ClientCapabilities capabilities,
 -  4: optional string catName
++  4: optional string catName,
++  5: optional i64 txnId=-1,
++  6: optional string validWriteIdList
+ }
+ 
+ struct GetTableResult {
 -  1: required Table table
++  1: required Table table,
++  2: optional bool isStatsCompliant
+ }
+ 
+ struct GetTablesRequest {
+   1: required string dbName,
+   2: optional list<string> tblNames,
+   3: optional ClientCapabilities capabilities,
+   4: optional string catName
+ }
+ 
+ struct GetTablesResult {
+   1: required list<Table> tables
+ }
+ 
+ // Request type for cm_recycle
+ struct CmRecycleRequest {
+   1: required string dataPath,
+   2: required bool purge
+ }
+ 
+ // Response type for cm_recycle
+ struct CmRecycleResponse {
+ }
+ 
+ struct TableMeta {
+   1: required string dbName;
+   2: required string tableName;
+   3: required string tableType;
+   4: optional string comments;
+   5: optional string catName;
+ }
+ 
+ struct Materialization {
+   1: required set<string> tablesUsed;
+   2: optional string validTxnList
+   3: optional i64 invalidationTime;
+   4: optional bool sourceTablesUpdateDeleteModified;
+ }
+ 
+ // Data types for workload management.
+ 
+ enum WMResourcePlanStatus {
+   ACTIVE = 1,
+   ENABLED = 2,
+   DISABLED = 3
+ }
+ 
+ enum  WMPoolSchedulingPolicy {
+   FAIR = 1,
+   FIFO = 2
+ }
+ 
+ struct WMResourcePlan {
+   1: required string name;
+   2: optional WMResourcePlanStatus status;
+   3: optional i32 queryParallelism;
+   4: optional string defaultPoolPath;
+ }
+ 
+ struct WMNullableResourcePlan {
+   1: optional string name;
+   2: optional WMResourcePlanStatus status;
+   4: optional i32 queryParallelism;
+   5: optional bool isSetQueryParallelism;
+   6: optional string defaultPoolPath;
+   7: optional bool isSetDefaultPoolPath;
+ }
+ 
+ struct WMPool {
+   1: required string resourcePlanName;
+   2: required string poolPath;
+   3: optional double allocFraction;
+   4: optional i32 queryParallelism;
+   5: optional string schedulingPolicy;
+ }
+ 
+ 
+ struct WMNullablePool {
+   1: required string resourcePlanName;
+   2: required string poolPath;
+   3: optional double allocFraction;
+   4: optional i32 queryParallelism;
+   5: optional string schedulingPolicy;
+   6: optional bool isSetSchedulingPolicy;
+ }
+ 
+ struct WMTrigger {
+   1: required string resourcePlanName;
+   2: required string triggerName;
+   3: optional string triggerExpression;
+   4: optional string actionExpression;
+   5: optional bool isInUnmanaged;
+ }
+ 
+ struct WMMapping {
+   1: required string resourcePlanName;
+   2: required string entityType;
+   3: required string entityName;
+   4: optional string poolPath;
+   5: optional i32 ordering;
+ }
+ 
+ struct WMPoolTrigger {
+   1: required string pool;
+   2: required string trigger;
+ }
+ 
+ struct WMFullResourcePlan {
+   1: required WMResourcePlan plan;
+   2: required list<WMPool> pools;
+   3: optional list<WMMapping> mappings;
+   4: optional list<WMTrigger> triggers;
+   5: optional list<WMPoolTrigger> poolTriggers;
+ }
+ 
+ // Request response for workload management API's.
+ 
+ struct WMCreateResourcePlanRequest {
+   1: optional WMResourcePlan resourcePlan;
+   2: optional string copyFrom;
+ }
+ 
+ struct WMCreateResourcePlanResponse {
+ }
+ 
+ struct WMGetActiveResourcePlanRequest {
+ }
+ 
+ struct WMGetActiveResourcePlanResponse {
+   1: optional WMFullResourcePlan resourcePlan;
+ }
+ 
+ struct WMGetResourcePlanRequest {
+   1: optional string resourcePlanName;
+ }
+ 
+ struct WMGetResourcePlanResponse {
+   1: optional WMFullResourcePlan resourcePlan;
+ }
+ 
+ struct WMGetAllResourcePlanRequest {
+ }
+ 
+ struct WMGetAllResourcePlanResponse {
+   1: optional list<WMResourcePlan> resourcePlans;
+ }
+ 
+ struct WMAlterResourcePlanRequest {
+   1: optional string resourcePlanName;
+   2: optional WMNullableResourcePlan resourcePlan;
+   3: optional bool isEnableAndActivate;
+   4: optional bool isForceDeactivate;
+   5: optional bool isReplace;
+ }
+ 
+ struct WMAlterResourcePlanResponse {
+   1: optional WMFullResourcePlan fullResourcePlan;
+ }
+ 
+ struct WMValidateResourcePlanRequest {
+   1: optional string resourcePlanName;
+ }
+ 
+ struct WMValidateResourcePlanResponse {
+   1: optional list<string> errors;
+   2: optional list<string> warnings;
+ }
+ 
+ struct WMDropResourcePlanRequest {
+   1: optional string resourcePlanName;
+ }
+ 
+ struct WMDropResourcePlanResponse {
+ }
+ 
+ struct WMCreateTriggerRequest {
+   1: optional WMTrigger trigger;
+ }
+ 
+ struct WMCreateTriggerResponse {
+ }
+ 
+ struct WMAlterTriggerRequest {
+   1: optional WMTrigger trigger;
+ }
+ 
+ struct WMAlterTriggerResponse {
+ }
+ 
+ struct WMDropTriggerRequest {
+   1: optional string resourcePlanName;
+   2: optional string triggerName;
+ }
+ 
+ struct WMDropTriggerResponse {
+ }
+ 
+ struct WMGetTriggersForResourePlanRequest {
+   1: optional string resourcePlanName;
+ }
+ 
+ struct WMGetTriggersForResourePlanResponse {
+   1: optional list<WMTrigger> triggers;
+ }
+ 
+ struct WMCreatePoolRequest {
+   1: optional WMPool pool;
+ }
+ 
+ struct WMCreatePoolResponse {
+ }
+ 
+ struct WMAlterPoolRequest {
+   1: optional WMNullablePool pool;
+   2: optional string poolPath;
+ }
+ 
+ struct WMAlterPoolResponse {
+ }
+ 
+ struct WMDropPoolRequest {
+   1: optional string resourcePlanName;
+   2: optional string poolPath;
+ }
+ 
+ struct WMDropPoolResponse {
+ }
+ 
+ struct WMCreateOrUpdateMappingRequest {
+   1: optional WMMapping mapping;
+   2: optional bool update;
+ }
+ 
+ struct WMCreateOrUpdateMappingResponse {
+ }
+ 
+ struct WMDropMappingRequest {
+   1: optional WMMapping mapping;
+ }
+ 
+ struct WMDropMappingResponse {
+ }
+ 
+ struct WMCreateOrDropTriggerToPoolMappingRequest {
+   1: optional string resourcePlanName;
+   2: optional string triggerName;
+   3: optional string poolPath;
+   4: optional bool drop;
+ }
+ 
+ struct WMCreateOrDropTriggerToPoolMappingResponse {
+ }
+ 
+ // Schema objects
+ // Schema is already taken, so for the moment I'm calling it an ISchema for Independent Schema
+ struct ISchema {
+   1: SchemaType schemaType,
+   2: string name,
+   3: string catName,
+   4: string dbName,
+   5: SchemaCompatibility compatibility,
+   6: SchemaValidation validationLevel,
+   7: bool canEvolve,
+   8: optional string schemaGroup,
+   9: optional string description
+ }
+ 
+ struct ISchemaName {
+   1: string catName,
+   2: string dbName,
+   3: string schemaName
+ }
+ 
+ struct AlterISchemaRequest {
+   1: ISchemaName name,
+   3: ISchema newSchema
+ }
+ 
+ struct SchemaVersion {
+   1:  ISchemaName schema,
+   2:  i32 version,
+   3:  i64 createdAt,
+   4:  list<FieldSchema> cols,
+   5:  optional SchemaVersionState state,
+   6:  optional string description,
+   7:  optional string schemaText,
+   8:  optional string fingerprint,
+   9:  optional string name,
+   10: optional SerDeInfo serDe
+ }
+ 
+ struct SchemaVersionDescriptor {
+   1: ISchemaName schema,
+   2: i32 version
+ }
+ 
+ struct FindSchemasByColsRqst {
+   1: optional string colName,
+   2: optional string colNamespace,
+   3: optional string type
+ }
+ 
+ struct FindSchemasByColsResp {
+   1: list<SchemaVersionDescriptor> schemaVersions
+ }
+ 
+ struct MapSchemaVersionToSerdeRequest {
+   1: SchemaVersionDescriptor schemaVersion,
+   2: string serdeName
+ }
+ 
+ struct SetSchemaVersionStateRequest {
+   1: SchemaVersionDescriptor schemaVersion,
+   2: SchemaVersionState state
+ }
+ 
+ struct GetSerdeRequest {
+   1: string serdeName
+ }
+ 
+ struct RuntimeStat {
+   1: optional i32 createTime,
+   2: required i32 weight,
+   3: required binary payload
+ }
+ 
+ struct GetRuntimeStatsRequest {
+   1: required i32 maxWeight,
+   2: required i32 maxCreateTime
+ }
+ 
++struct AlterPartitionsRequest {
++  1: required string dbName,
++  2: required string tableName,
++  3: required list<Partition> partitions,
++  4: required EnvironmentContext environmentContext,
++  5: optional i64 txnId=-1,
++  6: optional i64 writeId=-1,
++  7: optional string validWriteIdList
++}
++
++struct AlterPartitionsResponse {
++}
++
+ // Exceptions.
+ 
+ exception MetaException {
+   1: string message
+ }
+ 
+ exception UnknownTableException {
+   1: string message
+ }
+ 
+ exception UnknownDBException {
+   1: string message
+ }
+ 
+ exception AlreadyExistsException {
+   1: string message
+ }
+ 
+ exception InvalidPartitionException {
+   1: string message
+ }
+ 
+ exception UnknownPartitionException {
+   1: string message
+ }
+ 
+ exception InvalidObjectException {
+   1: string message
+ }
+ 
+ exception NoSuchObjectException {
+   1: string message
+ }
+ 
+ exception InvalidOperationException {
+   1: string message
+ }
+ 
+ exception ConfigValSecurityException {
+   1: string message
+ }
+ 
+ exception InvalidInputException {
+   1: string message
+ }
+ 
+ // Transaction and lock exceptions
+ exception NoSuchTxnException {
+     1: string message
+ }
+ 
+ exception TxnAbortedException {
+     1: string message
+ }
+ 
+ exception TxnOpenException {
+     1: string message
+ }
+ 
+ exception NoSuchLockException {
+     1: string message
+ }
+ 
+ /**
+ * This interface is live.
+ */
+ service ThriftHiveMetastore extends fb303.FacebookService
+ {
+   string getMetaConf(1:string key) throws(1:MetaException o1)
+   void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1)
+ 
+   void create_catalog(1: CreateCatalogRequest catalog) throws (1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3: MetaException o3)
+   void alter_catalog(1: AlterCatalogRequest rqst) throws (1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+   GetCatalogResponse get_catalog(1: GetCatalogRequest catName) throws (1:NoSuchObjectException o1, 2:MetaException o2)
+   GetCatalogsResponse get_catalogs() throws (1:MetaException o1)
+   void drop_catalog(1: DropCatalogRequest catName) throws (1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+   Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+   list<string> get_databases(1:string pattern) throws(1:MetaException o1)
+   list<string> get_all_databases() throws(1:MetaException o1)
+   void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // returns the type with given name (make seperate calls for the dependent types if needed)
+   Type get_type(1:string name)  throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+   bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   map<string, Type> get_type_all(1:string name)
+                                 throws(1:MetaException o2)
+ 
+   // Gets a list of FieldSchemas describing the columns of a particular table
+   list<FieldSchema> get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3),
+   list<FieldSchema> get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+ 
+   // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table
+   list<FieldSchema> get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+   list<FieldSchema> get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3)
+ 
+   // create a Hive table. Following fields must be set
+   // tableName
+   // database        (only 'default' for now until Hive QL supports databases)
+   // owner           (not needed, but good to have for tracking purposes)
+   // sd.cols         (list of field schemas)
+   // sd.inputFormat  (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat)
+   // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat)
+   // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe
+   // * See notes on DDL_TIME
+   void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4)
+   void create_table_with_environment_context(1:Table tbl,
+       2:EnvironmentContext environment_context)
+       throws (1:AlreadyExistsException o1,
+               2:InvalidObjectException o2, 3:MetaException o3,
+               4:NoSuchObjectException o4)
+   void create_table_with_constraints(1:Table tbl, 2: list<SQLPrimaryKey> primaryKeys, 3: list<SQLForeignKey> foreignKeys,
+   4: list<SQLUniqueConstraint> uniqueConstraints, 5: list<SQLNotNullConstraint> notNullConstraints,
+   6: list<SQLDefaultConstraint> defaultConstraints, 7: list<SQLCheckConstraint> checkConstraints)
+       throws (1:AlreadyExistsException o1,
+               2:InvalidObjectException o2, 3:MetaException o3,
+               4:NoSuchObjectException o4)
+   void drop_constraint(1:DropConstraintRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o3)
+   void add_primary_key(1:AddPrimaryKeyRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   void add_foreign_key(1:AddForeignKeyRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)  
+   void add_unique_constraint(1:AddUniqueConstraintRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   void add_not_null_constraint(1:AddNotNullConstraintRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   void add_default_constraint(1:AddDefaultConstraintRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   void add_check_constraint(1:AddCheckConstraintRequest req)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   // drops the table and all the partitions associated with it if the table has partitions
+   // delete data (including partitions) if deleteData is set to true
+   void drop_table(1:string dbname, 2:string name, 3:bool deleteData)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o3)
+   void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData,
+       4:EnvironmentContext environment_context)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o3)
+   void truncate_table(1:string dbName, 2:string tableName, 3:list<string> partNames)
+                           throws(1:MetaException o1)
+   list<string> get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1)
+   list<string> get_tables_by_type(1: string db_name, 2: string pattern, 3: string tableType) throws (1: MetaException o1)
+   list<string> get_materialized_views_for_rewriting(1: string db_name) throws (1: MetaException o1)
+   list<TableMeta> get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list<string> tbl_types)
+                        throws (1: MetaException o1)
+   list<string> get_all_tables(1: string db_name) throws (1: MetaException o1)
+ 
+   Table get_table(1:string dbname, 2:string tbl_name)
+                        throws (1:MetaException o1, 2:NoSuchObjectException o2)
+   list<Table> get_table_objects_by_name(1:string dbname, 2:list<string> tbl_names)
+   GetTableResult get_table_req(1:GetTableRequest req) throws (1:MetaException o1, 2:NoSuchObjectException o2)
+   GetTablesResult get_table_objects_by_name_req(1:GetTablesRequest req)
+ 				   throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+   map<string, Materialization> get_materialization_invalidation_info(1:string dbname, 2:list<string> tbl_names)
+ 				   throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+   void update_creation_metadata(1: string catName, 2:string dbname, 3:string tbl_name, 4:CreationMetadata creation_metadata)
+                    throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+ 
+   // Get a list of table names that match a filter.
+   // The filter operators are LIKE, <, <=, >, >=, =, <>
+   //
+   // In the filter statement, values interpreted as strings must be enclosed in quotes,
+   // while values interpreted as integers should not be.  Strings and integers are the only
+   // supported value types.
+   //
+   // The currently supported key names in the filter are:
+   // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name
+   //   and supports all filter operators
+   // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times
+   //   and supports all filter operators except LIKE
+   // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values
+   //   and only supports the filter operators = and <>.
+   //   Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement.
+   //   For example, to filter on parameter keys called "retention", the key name in the filter
+   //   statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention"
+   //   Also, = and <> only work for keys that exist
+   //   in the tables. E.g., if you are looking for tables where key1 <> value, it will only
+   //   look at tables that have a value for the parameter key1.
+   // Some example filter statements include:
+   // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " +
+   //   Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0";
+   // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " +
+   //   Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\""
+   // @param dbName
+   //          The name of the database from which you will retrieve the table names
+   // @param filterType
+   //          The type of filter
+   // @param filter
+   //          The filter string
+   // @param max_tables
+   //          The maximum number of tables returned
+   // @return  A list of table names that match the desired filter
+   list<string> get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1)
+                        throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3)
+ 
+   // alter table applies to only future partitions not for existing partitions
+   // * See notes on DDL_TIME
+   void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl)
+                        throws (1:InvalidOperationException o1, 2:MetaException o2)
+   void alter_table_with_environment_context(1:string dbname, 2:string tbl_name,
+       3:Table new_tbl, 4:EnvironmentContext environment_context)
+       throws (1:InvalidOperationException o1, 2:MetaException o2)
+   // alter table not only applies to future partitions but also cascade to existing partitions
+   void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade)
+                        throws (1:InvalidOperationException o1, 2:MetaException o2)
+   // the following applies to only tables that have partitions
+   // * See notes on DDL_TIME
+   Partition add_partition(1:Partition new_part)
+                        throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   Partition add_partition_with_environment_context(1:Partition new_part,
+       2:EnvironmentContext environment_context)
+       throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2,
+       3:MetaException o3)
+   i32 add_partitions(1:list<Partition> new_parts)
+                        throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   i32 add_partitions_pspec(1:list<PartitionSpec> new_parts)
+                        throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   Partition append_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
+                        throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request)
+                        throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name,
+       3:list<string> part_vals, 4:EnvironmentContext environment_context)
+                        throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name)
+                        throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
+       3:string part_name, 4:EnvironmentContext environment_context)
+                        throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3)
+   bool drop_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:bool deleteData)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   bool drop_partition_with_environment_context(1:string db_name, 2:string tbl_name,
+       3:list<string> part_vals, 4:bool deleteData, 5:EnvironmentContext environment_context)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   bool drop_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name,
+       3:string part_name, 4:bool deleteData, 5:EnvironmentContext environment_context)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   Partition get_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   Partition exchange_partition(1:map<string, string> partitionSpecs, 2:string source_db,
+       3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
+       throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
+       4:InvalidInputException o4)
+ 
+   list<Partition> exchange_partitions(1:map<string, string> partitionSpecs, 2:string source_db,
+       3:string source_table_name, 4:string dest_db, 5:string dest_table_name)
+       throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3,
+       4:InvalidInputException o4)
+ 
+   Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals,
+       4: string user_name, 5: list<string> group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // returns all the partitions for this table in reverse chronological order.
+   // If max parts is given then it will return only that many.
+   list<Partition> get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   list<Partition> get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1,
+      4: string user_name, 5: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   list<PartitionSpec> get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   list<string> get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1)
+                        throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   PartitionValuesResponse get_partition_values(1:PartitionValuesRequest request)
+     throws(1:MetaException o1, 2:NoSuchObjectException o2);
+ 
+   // get_partition*_ps methods allow filtering by a partial partition specification,
+   // as needed for dynamic partitions. The values that are not restricted should
+   // be empty strings. Nulls were considered (instead of "") but caused errors in
+   // generated Python code. The size of part_vals may be smaller than the
+   // number of partition columns - the unspecified values are considered the same
+   // as "".
+   list<Partition> get_partitions_ps(1:string db_name 2:string tbl_name
+   	3:list<string> part_vals, 4:i16 max_parts=-1)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   list<Partition> get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1,
+      5: string user_name, 6: list<string> group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   list<string> get_partition_names_ps(1:string db_name,
+   	2:string tbl_name, 3:list<string> part_vals, 4:i16 max_parts=-1)
+   	                   throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // get the partitions matching the given partition filter
+   list<Partition> get_partitions_by_filter(1:string db_name 2:string tbl_name
+     3:string filter, 4:i16 max_parts=-1)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // List partitions as PartitionSpec instances.
+   list<PartitionSpec> get_part_specs_by_filter(1:string db_name 2:string tbl_name
+     3:string filter, 4:i32 max_parts=-1)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // get the partitions matching the given partition filter
+   // unlike get_partitions_by_filter, takes serialized hive expression, and with that can work
+   // with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL.
+   PartitionsByExprResult get_partitions_by_expr(1:PartitionsByExprRequest req)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // get the partitions matching the given partition filter
+   i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // get partitions give a list of partition names
+   list<Partition> get_partitions_by_names(1:string db_name 2:string tbl_name 3:list<string> names)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // changes the partition to the new partition object. partition is identified from the part values
+   // in the new_part
+   // * See notes on DDL_TIME
+   void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part)
+                        throws (1:InvalidOperationException o1, 2:MetaException o2)
+ 
+   // change a list of partitions. All partitions are altered atomically and all
+   // prehooks are fired together followed by all post hooks
+   void alter_partitions(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts)
+                        throws (1:InvalidOperationException o1, 2:MetaException o2)
++
+   void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list<Partition> new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2)
+ 
++  AlterPartitionsResponse alter_partitions_with_environment_context_req(1:AlterPartitionsRequest req)
++      throws (1:InvalidOperationException o1, 2:MetaException o2)
++
+   void alter_partition_with_environment_context(1:string db_name,
+       2:string tbl_name, 3:Partition new_part,
+       4:EnvironmentContext environment_context)
+       throws (1:InvalidOperationException o1, 2:MetaException o2)
+ 
+   // rename the old partition to the new partition object by changing old part values to the part values
+   // in the new_part. old partition is identified from part_vals.
+   // partition keys in new_part should be the same as those in old partition.
+   void rename_partition(1:string db_name, 2:string tbl_name, 3:list<string> part_vals, 4:Partition new_part)
+                        throws (1:InvalidOperationException o1, 2:MetaException o2)
+ 
+   // returns whether or not the partition name is valid based on the value of the config
+   // hive.metastore.partition.name.whitelist.pattern
+   bool partition_name_has_valid_characters(1:list<string> part_vals, 2:bool throw_exception)
+  	throws(1: MetaException o1)
+ 
+   // gets the value of the configuration key in the metastore server. returns
+   // defaultValue if the key does not exist. if the configuration key does not
+   // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is
+   // thrown.
+   string get_config_value(1:string name, 2:string defaultValue)
+                           throws(1:ConfigValSecurityException o1)
+ 
+   // converts a partition name into a partition values array
+   list<string> partition_name_to_vals(1: string part_name)
+                           throws(1: MetaException o1)
+   // converts a partition name into a partition specification (a mapping from
+   // the partition cols to the values)
+   map<string, string> partition_name_to_spec(1: string part_name)
+                           throws(1: MetaException o1)
+ 
+   void markPartitionForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
+                   4:PartitionEventType eventType) throws (1: MetaException o1, 2: NoSuchObjectException o2,
+                   3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
+                   6: InvalidPartitionException o6)
+   bool isPartitionMarkedForEvent(1:string db_name, 2:string tbl_name, 3:map<string,string> part_vals,
+                   4: PartitionEventType eventType) throws (1: MetaException o1, 2:NoSuchObjectException o2,
+                   3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5,
+                   6: InvalidPartitionException o6)
+ 
+   //primary keys and foreign keys
+   PrimaryKeysResponse get_primary_keys(1:PrimaryKeysRequest request)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   ForeignKeysResponse get_foreign_keys(1:ForeignKeysRequest request)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   // other constraints
+   UniqueConstraintsResponse get_unique_constraints(1:UniqueConstraintsRequest request)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   NotNullConstraintsResponse get_not_null_constraints(1:NotNullConstraintsRequest request)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   DefaultConstraintsResponse get_default_constraints(1:DefaultConstraintsRequest request)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+   CheckConstraintsResponse get_check_constraints(1:CheckConstraintsRequest request)
+                        throws(1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   // column statistics interfaces
+ 
+   // update APIs persist the column statistics object(s) that are passed in. If statistics already
+   // exists for one or more columns, the existing statistics will be overwritten. The update APIs
+   // validate that the dbName, tableName, partName, colName[] passed in as part of the ColumnStatistics
+   // struct are valid, throws InvalidInputException/NoSuchObjectException if found to be invalid
+   bool update_table_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
+               2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+   bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1,
+               2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+ 
+   // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if
+   // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException
+   // For instance, if get_table_column_statistics is called on a partitioned table for which only
+   // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException
+   ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
+               (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4)
+   ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name,
+                4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2,
+                3:InvalidInputException o3, 4:InvalidObjectException o4)
+   TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws
+               (1:NoSuchObjectException o1, 2:MetaException o2)
+   PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws
+               (1:NoSuchObjectException o1, 2:MetaException o2)
+   AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws
+               (1:NoSuchObjectException o1, 2:MetaException o2)
+   bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws
+               (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4)
+ 
+ 
+   // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name]
+   // and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException
+   // Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException.
+   bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws
+               (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
+                4:InvalidInputException o4)
+   bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws
+               (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3,
+                4:InvalidInputException o4)
+ 
+   //
+   // user-defined functions
+   //
+ 
+   void create_function(1:Function func)
+       throws (1:AlreadyExistsException o1,
+               2:InvalidObjectException o2,
+               3:MetaException o3,
+               4:NoSuchObjectException o4)
+ 
+   void drop_function(1:string dbName, 2:string funcName)
+       throws (1:NoSuchObjectException o1, 2:MetaException o3)
+ 
+   void alter_function(1:string dbName, 2:string funcName, 3:Function newFunc)
+       throws (1:InvalidOperationException o1, 2:MetaException o2)
+ 
+   list<string> get_functions(1:string dbName, 2:string pattern)
+       throws (1:MetaException o1)
+   Function get_function(1:string dbName, 2:string funcName)
+       throws (1:MetaException o1, 2:NoSuchObjectException o2)
+ 
+   GetAllFunctionsResponse get_all_functions() throws (1:MetaException o1)
+ 
+   //authorization privileges
+ 
+   bool create_role(1:Role role) throws(1:MetaException o1)
+   bool drop_role(1:string role_name) throws(1:MetaException o1)
+   list<string> get_role_names() throws(1:MetaException o1)
+   // Deprecated, use grant_revoke_role()
+   bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type,
+     4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1)
+   // Deprecated, use grant_revoke_role()
+   bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type)
+                         throws(1:MetaException o1)
+   list<Role> list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1)
+   GrantRevokeRoleResponse grant_revoke_role(1:GrantRevokeRoleRequest request) throws(1:MetaException o1)
+ 
+   // get all role-grants for users/roles that have been granted the given role
+   // Note that in the returned list of RolePrincipalGrants, the roleName is
+   // redundant as it would match the role_name argument of this function
+   GetPrincipalsInRoleResponse get_principals_in_role(1: GetPrincipalsInRoleRequest request) throws(1:MetaException o1)
+ 
+   // get grant information of all roles granted to the given principal
+   // Note that in the returned list of RolePrincipalGrants, the principal name,type is
+   // redundant as it would match the principal name,type arguments of this function
+   GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(1: GetRoleGrantsForPrincipalRequest request) throws(1:MetaException o1)
+ 
+   PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name,
+     3: list<string> group_names) throws(1:MetaException o1)
+   list<HiveObjectPrivilege> list_privileges(1:string principal_name, 2:PrincipalType principal_type,
+     3: HiveObjectRef hiveObject) throws(1:MetaException o1)
+ 
+   // Deprecated, use grant_revoke_privileges()
+   bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
+   // Deprecated, use grant_revoke_privileges()
+   bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1)
+   GrantRevokePrivilegeResponse grant_revoke_privileges(1:GrantRevokePrivilegeRequest request) throws(1:MetaException o1);
+   // Revokes all privileges for the object and adds the newly granted privileges for it.
+   GrantRevokePrivilegeResponse refresh_privileges(1:HiveObjectRef objToRefresh, 2:string authorizer, 3:GrantRevokePrivilegeRequest grantRequest) throws(1:MetaException o1);
+ 
+   // this is used by metastore client to send UGI information to metastore server immediately
+   // after setting up a connection.
+   list<string> set_ugi(1:string user_name, 2:list<string> group_names) throws (1:MetaException o1)
+ 
+   //Authentication (delegation token) interfaces
+ 
+   // get metastore server delegation token for use from the map/reduce tasks to authenticate
+   // to metastore server
+   string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name)
+     throws (1:MetaException o1)
+ 
+   // method to renew delegation token obtained from metastore server
+   i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1)
+ 
+   // method to cancel delegation token obtained from metastore server
+   void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1)
+ 
+   // add a delegation token
+   bool add_token(1:string token_identifier, 2:string delegation_token)
+ 
+   // remove a delegation token
+   bool remove_token(1:string token_identifier)
+ 
+   // get a delegation token by identifier
+   string get_token(1:string token_identifier)
+ 
+   // get all delegation token identifiers
+   list<string> get_all_token_identifiers()
+ 
+   // add master key
+   i32 add_master_key(1:string key) throws (1:MetaException o1)
+ 
+   // update master key
+   void update_master_key(1:i32 seq_number, 2:string key) throws (1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   // remove master key
+   bool remove_master_key(1:i32 key_seq)
+ 
+   // get master keys
+   list<string> get_master_keys()
+ 
+   // Transaction and lock management calls
+   // Get just list of open transactions
+   GetOpenTxnsResponse get_open_txns()
+   // Get list of open transactions with state (open, aborted)
+   GetOpenTxnsInfoResponse get_open_txns_info()
+   OpenTxnsResponse open_txns(1:OpenTxnRequest rqst)
+   void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1)
+   void abort_txns(1:AbortTxnsRequest rqst) throws (1:NoSuchTxnException o1)
+   void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+   void repl_tbl_writeid_state(1: ReplTblWriteIdStateRequest rqst)
+   GetValidWriteIdsResponse get_valid_write_ids(1:GetValidWriteIdsRequest rqst)
+       throws (1:NoSuchTxnException o1, 2:MetaException o2)
+   AllocateTableWriteIdsResponse allocate_table_write_ids(1:AllocateTableWriteIdsRequest rqst)
+     throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:MetaException o3)
+   LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+   LockResponse check_lock(1:CheckLockRequest rqst)
+     throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3)
+   void unlock(1:UnlockRequest rqst) throws (1:NoSuchLockException o1, 2:TxnOpenException o2)
+   ShowLocksResponse show_locks(1:ShowLocksRequest rqst)
+   void heartbeat(1:HeartbeatRequest ids) throws (1:NoSuchLockException o1, 2:NoSuchTxnException o2, 3:TxnAbortedException o3)
+   HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns)
+   void compact(1:CompactionRequest rqst) 
+   CompactionResponse compact2(1:CompactionRequest rqst) 
+   ShowCompactResponse show_compact(1:ShowCompactRequest rqst)
+   void add_dynamic_partitions(1:AddDynamicPartitions rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2)
+ 
+   // Notification logging calls
+   NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst) 
+   CurrentNotificationEventId get_current_notificationEventId()
+   NotificationEventsCountResponse get_notification_events_count(1:NotificationEventsCountRequest rqst)
+   FireEventResponse fire_listener_event(1:FireEventRequest rqst)
+   void flushCache()
+   WriteNotificationLogResponse add_write_notification_log(WriteNotificationLogRequest rqst)
+ 
+   // Repl Change Management api
+   CmRecycleResponse cm_recycle(1:CmRecycleRequest request) throws(1:MetaException o1)
+ 
+   GetFileMetadataByExprResult get_file_metadata_by_expr(1:GetFileMetadataByExprRequest req)
+   GetFileMetadataResult get_file_metadata(1:GetFileMetadataRequest req)
+   PutFileMetadataResult put_file_metadata(1:PutFileMetadataRequest req)
+   ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req)
+   CacheFileMetadataResult cache_file_metadata(1:CacheFileMetadataRequest req)
+ 
+   // Metastore DB properties
+   string get_metastore_db_uuid() throws (1:MetaException o1)
+ 
+   // Workload management API's
+   WMCreateResourcePlanResponse create_resource_plan(1:WMCreateResourcePlanRequest request)
+       throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+ 
+   WMGetResourcePlanResponse get_resource_plan(1:WMGetResourcePlanRequest request)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   WMGetActiveResourcePlanResponse get_active_resource_plan(1:WMGetActiveResourcePlanRequest request)
+       throws(1:MetaException o2)
+ 
+   WMGetAllResourcePlanResponse get_all_resource_plans(1:WMGetAllResourcePlanRequest request)
+       throws(1:MetaException o1)
+ 
+   WMAlterResourcePlanResponse alter_resource_plan(1:WMAlterResourcePlanRequest request)
+       throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   WMValidateResourcePlanResponse validate_resource_plan(1:WMValidateResourcePlanRequest request)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   WMDropResourcePlanResponse drop_resource_plan(1:WMDropResourcePlanRequest request)
+       throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   WMCreateTriggerResponse create_wm_trigger(1:WMCreateTriggerRequest request)
+       throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+ 
+   WMAlterTriggerResponse alter_wm_trigger(1:WMAlterTriggerRequest request)
+       throws(1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3)
+ 
+   WMDropTriggerResponse drop_wm_trigger(1:WMDropTriggerRequest request)
+       throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(1:WMGetTriggersForResourePlanRequest request)
+       throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   WMCreatePoolResponse create_wm_pool(1:WMCreatePoolRequest request)
+       throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+ 
+   WMAlterPoolResponse alter_wm_pool(1:WMAlterPoolRequest request)
+       throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+ 
+   WMDropPoolResponse drop_wm_pool(1:WMDropPoolRequest request)
+       throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(1:WMCreateOrUpdateMappingRequest request)
+       throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+ 
+   WMDropMappingResponse drop_wm_mapping(1:WMDropMappingRequest request)
+       throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(1:WMCreateOrDropTriggerToPoolMappingRequest request)
+       throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, 4:MetaException o4)
+ 
+   // Schema calls
+   void create_ischema(1:ISchema schema) throws(1:AlreadyExistsException o1,
+         NoSuchObjectException o2, 3:MetaException o3)
+   void alter_ischema(1:AlterISchemaRequest rqst)
+         throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   ISchema get_ischema(1:ISchemaName name) throws (1:NoSuchObjectException o1, 2:MetaException o2)
+   void drop_ischema(1:ISchemaName name)
+         throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   void add_schema_version(1:SchemaVersion schemaVersion)
+         throws(1:AlreadyExistsException o1, 2:NoSuchObjectException o2, 3:MetaException o3)
+   SchemaVersion get_schema_version(1: SchemaVersionDescriptor schemaVersion)
+         throws (1:NoSuchObjectException o1, 2:MetaException o2)
+   SchemaVersion get_schema_latest_version(1: ISchemaName schemaName)
+         throws (1:NoSuchObjectException o1, 2:MetaException o2)
+   list<SchemaVersion> get_schema_all_versions(1: ISchemaName schemaName)
+         throws (1:NoSuchObjectException o1, 2:MetaException o2)
+   void drop_schema_version(1: SchemaVersionDescriptor schemaVersion)
+         throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   FindSchemasByColsResp get_schemas_by_cols(1: FindSchemasByColsRqst rqst)
+         throws(1:MetaException o1)
+   // There is no blanket update of SchemaVersion since it is (mostly) immutable.  The only
+   // updates are the specific ones to associate a version with a serde and to change its state
+   void map_schema_version_to_serde(1: MapSchemaVersionToSerdeRequest rqst)
+         throws(1:NoSuchObjectException o1, 2:MetaException o2)
+   void set_schema_version_state(1: SetSchemaVersionStateRequest rqst)
+         throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3)
+ 
+   void add_serde(1: SerDeInfo serde) throws(1:AlreadyExistsException o1, 2:MetaException o2)
+   SerDeInfo get_serde(1: GetSerdeRequest rqst) throws(1:NoSuchObjectException o1, 2:MetaException o2)
+ 
+   LockResponse get_lock_materialization_rebuild(1: string dbName, 2: string tableName, 3: i64 txnId)
+   bool heartbeat_lock_materialization_rebuild(1: string dbName, 2: string tableName, 3: i64 txnId)
+   
+   void add_runtime_stats(1: RuntimeStat stat) throws(1:MetaException o1)
+   list<RuntimeStat> get_runtime_stats(1: GetRuntimeStatsRequest rqst) throws(1:MetaException o1)
+ }
+ 
+ // * Note about the DDL_TIME: When creating or altering a table or a partition,
+ // if the DDL_TIME is not set, the current time will be used.
+ 
+ // For storing info about archived partitions in parameters
+ 
+ // Whether the partition is archived
+ const string IS_ARCHIVED = "is_archived",
+ // The original location of the partition, before archiving. After archiving,
+ // this directory will contain the archive. When the partition
+ // is dropped, this directory will be deleted
+ const string ORIGINAL_LOCATION = "original_location",
+ 
+ // Whether or not the table is considered immutable - immutable tables can only be
+ // overwritten or created if unpartitioned, or if partitioned, partitions inside them
+ // can only be overwritt

<TRUNCATED>

[18/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
new file mode 100644
index 0000000..957a414
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidOperationException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InvalidOperationException extends TException implements org.apache.thrift.TBase<InvalidOperationException, InvalidOperationException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidOperationException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidOperationException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new InvalidOperationExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new InvalidOperationExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidOperationException.class, metaDataMap);
+  }
+
+  public InvalidOperationException() {
+  }
+
+  public InvalidOperationException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public InvalidOperationException(InvalidOperationException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public InvalidOperationException deepCopy() {
+    return new InvalidOperationException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof InvalidOperationException)
+      return this.equals((InvalidOperationException)that);
+    return false;
+  }
+
+  public boolean equals(InvalidOperationException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(InvalidOperationException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("InvalidOperationException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class InvalidOperationExceptionStandardSchemeFactory implements SchemeFactory {
+    public InvalidOperationExceptionStandardScheme getScheme() {
+      return new InvalidOperationExceptionStandardScheme();
+    }
+  }
+
+  private static class InvalidOperationExceptionStandardScheme extends StandardScheme<InvalidOperationException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidOperationException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidOperationException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class InvalidOperationExceptionTupleSchemeFactory implements SchemeFactory {
+    public InvalidOperationExceptionTupleScheme getScheme() {
+      return new InvalidOperationExceptionTupleScheme();
+    }
+  }
+
+  private static class InvalidOperationExceptionTupleScheme extends TupleScheme<InvalidOperationException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, InvalidOperationException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, InvalidOperationException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
new file mode 100644
index 0000000..b0e0343
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InvalidPartitionException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class InvalidPartitionException extends TException implements org.apache.thrift.TBase<InvalidPartitionException, InvalidPartitionException._Fields>, java.io.Serializable, Cloneable, Comparable<InvalidPartitionException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("InvalidPartitionException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new InvalidPartitionExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new InvalidPartitionExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(InvalidPartitionException.class, metaDataMap);
+  }
+
+  public InvalidPartitionException() {
+  }
+
+  public InvalidPartitionException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public InvalidPartitionException(InvalidPartitionException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public InvalidPartitionException deepCopy() {
+    return new InvalidPartitionException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof InvalidPartitionException)
+      return this.equals((InvalidPartitionException)that);
+    return false;
+  }
+
+  public boolean equals(InvalidPartitionException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(InvalidPartitionException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("InvalidPartitionException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class InvalidPartitionExceptionStandardSchemeFactory implements SchemeFactory {
+    public InvalidPartitionExceptionStandardScheme getScheme() {
+      return new InvalidPartitionExceptionStandardScheme();
+    }
+  }
+
+  private static class InvalidPartitionExceptionStandardScheme extends StandardScheme<InvalidPartitionException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, InvalidPartitionException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, InvalidPartitionException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class InvalidPartitionExceptionTupleSchemeFactory implements SchemeFactory {
+    public InvalidPartitionExceptionTupleScheme getScheme() {
+      return new InvalidPartitionExceptionTupleScheme();
+    }
+  }
+
+  private static class InvalidPartitionExceptionTupleScheme extends TupleScheme<InvalidPartitionException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, InvalidPartitionException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, InvalidPartitionException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
new file mode 100644
index 0000000..77de5c9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockComponent.java
@@ -0,0 +1,1158 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class LockComponent implements org.apache.thrift.TBase<LockComponent, LockComponent._Fields>, java.io.Serializable, Cloneable, Comparable<LockComponent> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("LockComponent");
+
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField LEVEL_FIELD_DESC = new org.apache.thrift.protocol.TField("level", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField PARTITIONNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionname", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)6);
+  private static final org.apache.thrift.protocol.TField IS_TRANSACTIONAL_FIELD_DESC = new org.apache.thrift.protocol.TField("isTransactional", org.apache.thrift.protocol.TType.BOOL, (short)7);
+  private static final org.apache.thrift.protocol.TField IS_DYNAMIC_PARTITION_WRITE_FIELD_DESC = new org.apache.thrift.protocol.TField("isDynamicPartitionWrite", org.apache.thrift.protocol.TType.BOOL, (short)8);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new LockComponentStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new LockComponentTupleSchemeFactory());
+  }
+
+  private LockType type; // required
+  private LockLevel level; // required
+  private String dbname; // required
+  private String tablename; // optional
+  private String partitionname; // optional
+  private DataOperationType operationType; // optional
+  private boolean isTransactional; // optional
+  private boolean isDynamicPartitionWrite; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see LockType
+     */
+    TYPE((short)1, "type"),
+    /**
+     * 
+     * @see LockLevel
+     */
+    LEVEL((short)2, "level"),
+    DBNAME((short)3, "dbname"),
+    TABLENAME((short)4, "tablename"),
+    PARTITIONNAME((short)5, "partitionname"),
+    /**
+     * 
+     * @see DataOperationType
+     */
+    OPERATION_TYPE((short)6, "operationType"),
+    IS_TRANSACTIONAL((short)7, "isTransactional"),
+    IS_DYNAMIC_PARTITION_WRITE((short)8, "isDynamicPartitionWrite");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TYPE
+          return TYPE;
+        case 2: // LEVEL
+          return LEVEL;
+        case 3: // DBNAME
+          return DBNAME;
+        case 4: // TABLENAME
+          return TABLENAME;
+        case 5: // PARTITIONNAME
+          return PARTITIONNAME;
+        case 6: // OPERATION_TYPE
+          return OPERATION_TYPE;
+        case 7: // IS_TRANSACTIONAL
+          return IS_TRANSACTIONAL;
+        case 8: // IS_DYNAMIC_PARTITION_WRITE
+          return IS_DYNAMIC_PARTITION_WRITE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISTRANSACTIONAL_ISSET_ID = 0;
+  private static final int __ISDYNAMICPARTITIONWRITE_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.TABLENAME,_Fields.PARTITIONNAME,_Fields.OPERATION_TYPE,_Fields.IS_TRANSACTIONAL,_Fields.IS_DYNAMIC_PARTITION_WRITE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, LockType.class)));
+    tmpMap.put(_Fields.LEVEL, new org.apache.thrift.meta_data.FieldMetaData("level", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, LockLevel.class)));
+    tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLENAME, new org.apache.thrift.meta_data.FieldMetaData("tablename", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITIONNAME, new org.apache.thrift.meta_data.FieldMetaData("partitionname", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.OPERATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("operationType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, DataOperationType.class)));
+    tmpMap.put(_Fields.IS_TRANSACTIONAL, new org.apache.thrift.meta_data.FieldMetaData("isTransactional", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.IS_DYNAMIC_PARTITION_WRITE, new org.apache.thrift.meta_data.FieldMetaData("isDynamicPartitionWrite", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(LockComponent.class, metaDataMap);
+  }
+
+  public LockComponent() {
+    this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET;
+
+    this.isTransactional = false;
+
+    this.isDynamicPartitionWrite = false;
+
+  }
+
+  public LockComponent(
+    LockType type,
+    LockLevel level,
+    String dbname)
+  {
+    this();
+    this.type = type;
+    this.level = level;
+    this.dbname = dbname;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public LockComponent(LockComponent other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+    if (other.isSetLevel()) {
+      this.level = other.level;
+    }
+    if (other.isSetDbname()) {
+      this.dbname = other.dbname;
+    }
+    if (other.isSetTablename()) {
+      this.tablename = other.tablename;
+    }
+    if (other.isSetPartitionname()) {
+      this.partitionname = other.partitionname;
+    }
+    if (other.isSetOperationType()) {
+      this.operationType = other.operationType;
+    }
+    this.isTransactional = other.isTransactional;
+    this.isDynamicPartitionWrite = other.isDynamicPartitionWrite;
+  }
+
+  public LockComponent deepCopy() {
+    return new LockComponent(this);
+  }
+
+  @Override
+  public void clear() {
+    this.type = null;
+    this.level = null;
+    this.dbname = null;
+    this.tablename = null;
+    this.partitionname = null;
+    this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET;
+
+    this.isTransactional = false;
+
+    this.isDynamicPartitionWrite = false;
+
+  }
+
+  /**
+   * 
+   * @see LockType
+   */
+  public LockType getType() {
+    return this.type;
+  }
+
+  /**
+   * 
+   * @see LockType
+   */
+  public void setType(LockType type) {
+    this.type = type;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  /**
+   * 
+   * @see LockLevel
+   */
+  public LockLevel getLevel() {
+    return this.level;
+  }
+
+  /**
+   * 
+   * @see LockLevel
+   */
+  public void setLevel(LockLevel level) {
+    this.level = level;
+  }
+
+  public void unsetLevel() {
+    this.level = null;
+  }
+
+  /** Returns true if field level is set (has been assigned a value) and false otherwise */
+  public boolean isSetLevel() {
+    return this.level != null;
+  }
+
+  public void setLevelIsSet(boolean value) {
+    if (!value) {
+      this.level = null;
+    }
+  }
+
+  public String getDbname() {
+    return this.dbname;
+  }
+
+  public void setDbname(String dbname) {
+    this.dbname = dbname;
+  }
+
+  public void unsetDbname() {
+    this.dbname = null;
+  }
+
+  /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbname() {
+    return this.dbname != null;
+  }
+
+  public void setDbnameIsSet(boolean value) {
+    if (!value) {
+      this.dbname = null;
+    }
+  }
+
+  public String getTablename() {
+    return this.tablename;
+  }
+
+  public void setTablename(String tablename) {
+    this.tablename = tablename;
+  }
+
+  public void unsetTablename() {
+    this.tablename = null;
+  }
+
+  /** Returns true if field tablename is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablename() {
+    return this.tablename != null;
+  }
+
+  public void setTablenameIsSet(boolean value) {
+    if (!value) {
+      this.tablename = null;
+    }
+  }
+
+  public String getPartitionname() {
+    return this.partitionname;
+  }
+
+  public void setPartitionname(String partitionname) {
+    this.partitionname = partitionname;
+  }
+
+  public void unsetPartitionname() {
+    this.partitionname = null;
+  }
+
+  /** Returns true if field partitionname is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionname() {
+    return this.partitionname != null;
+  }
+
+  public void setPartitionnameIsSet(boolean value) {
+    if (!value) {
+      this.partitionname = null;
+    }
+  }
+
+  /**
+   * 
+   * @see DataOperationType
+   */
+  public DataOperationType getOperationType() {
+    return this.operationType;
+  }
+
+  /**
+   * 
+   * @see DataOperationType
+   */
+  public void setOperationType(DataOperationType operationType) {
+    this.operationType = operationType;
+  }
+
+  public void unsetOperationType() {
+    this.operationType = null;
+  }
+
+  /** Returns true if field operationType is set (has been assigned a value) and false otherwise */
+  public boolean isSetOperationType() {
+    return this.operationType != null;
+  }
+
+  public void setOperationTypeIsSet(boolean value) {
+    if (!value) {
+      this.operationType = null;
+    }
+  }
+
+  public boolean isIsTransactional() {
+    return this.isTransactional;
+  }
+
+  public void setIsTransactional(boolean isTransactional) {
+    this.isTransactional = isTransactional;
+    setIsTransactionalIsSet(true);
+  }
+
+  public void unsetIsTransactional() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISTRANSACTIONAL_ISSET_ID);
+  }
+
+  /** Returns true if field isTransactional is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsTransactional() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISTRANSACTIONAL_ISSET_ID);
+  }
+
+  public void setIsTransactionalIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISTRANSACTIONAL_ISSET_ID, value);
+  }
+
+  public boolean isIsDynamicPartitionWrite() {
+    return this.isDynamicPartitionWrite;
+  }
+
+  public void setIsDynamicPartitionWrite(boolean isDynamicPartitionWrite) {
+    this.isDynamicPartitionWrite = isDynamicPartitionWrite;
+    setIsDynamicPartitionWriteIsSet(true);
+  }
+
+  public void unsetIsDynamicPartitionWrite() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISDYNAMICPARTITIONWRITE_ISSET_ID);
+  }
+
+  /** Returns true if field isDynamicPartitionWrite is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsDynamicPartitionWrite() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISDYNAMICPARTITIONWRITE_ISSET_ID);
+  }
+
+  public void setIsDynamicPartitionWriteIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISDYNAMICPARTITIONWRITE_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((LockType)value);
+      }
+      break;
+
+    case LEVEL:
+      if (value == null) {
+        unsetLevel();
+      } else {
+        setLevel((LockLevel)value);
+      }
+      break;
+
+    case DBNAME:
+      if (value == null) {
+        unsetDbname();
+      } else {
+        setDbname((String)value);
+      }
+      break;
+
+    case TABLENAME:
+      if (value == null) {
+        unsetTablename();
+      } else {
+        setTablename((String)value);
+      }
+      break;
+
+    case PARTITIONNAME:
+      if (value == null) {
+        unsetPartitionname();
+      } else {
+        setPartitionname((String)value);
+      }
+      break;
+
+    case OPERATION_TYPE:
+      if (value == null) {
+        unsetOperationType();
+      } else {
+        setOperationType((DataOperationType)value);
+      }
+      break;
+
+    case IS_TRANSACTIONAL:
+      if (value == null) {
+        unsetIsTransactional();
+      } else {
+        setIsTransactional((Boolean)value);
+      }
+      break;
+
+    case IS_DYNAMIC_PARTITION_WRITE:
+      if (value == null) {
+        unsetIsDynamicPartitionWrite();
+      } else {
+        setIsDynamicPartitionWrite((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TYPE:
+      return getType();
+
+    case LEVEL:
+      return getLevel();
+
+    case DBNAME:
+      return getDbname();
+
+    case TABLENAME:
+      return getTablename();
+
+    case PARTITIONNAME:
+      return getPartitionname();
+
+    case OPERATION_TYPE:
+      return getOperationType();
+
+    case IS_TRANSACTIONAL:
+      return isIsTransactional();
+
+    case IS_DYNAMIC_PARTITION_WRITE:
+      return isIsDynamicPartitionWrite();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TYPE:
+      return isSetType();
+    case LEVEL:
+      return isSetLevel();
+    case DBNAME:
+      return isSetDbname();
+    case TABLENAME:
+      return isSetTablename();
+    case PARTITIONNAME:
+      return isSetPartitionname();
+    case OPERATION_TYPE:
+      return isSetOperationType();
+    case IS_TRANSACTIONAL:
+      return isSetIsTransactional();
+    case IS_DYNAMIC_PARTITION_WRITE:
+      return isSetIsDynamicPartitionWrite();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof LockComponent)
+      return this.equals((LockComponent)that);
+    return false;
+  }
+
+  public boolean equals(LockComponent that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    boolean this_present_level = true && this.isSetLevel();
+    boolean that_present_level = true && that.isSetLevel();
+    if (this_present_level || that_present_level) {
+      if (!(this_present_level && that_present_level))
+        return false;
+      if (!this.level.equals(that.level))
+        return false;
+    }
+
+    boolean this_present_dbname = true && this.isSetDbname();
+    boolean that_present_dbname = true && that.isSetDbname();
+    if (this_present_dbname || that_present_dbname) {
+      if (!(this_present_dbname && that_present_dbname))
+        return false;
+      if (!this.dbname.equals(that.dbname))
+        return false;
+    }
+
+    boolean this_present_tablename = true && this.isSetTablename();
+    boolean that_present_tablename = true && that.isSetTablename();
+    if (this_present_tablename || that_present_tablename) {
+      if (!(this_present_tablename && that_present_tablename))
+        return false;
+      if (!this.tablename.equals(that.tablename))
+        return false;
+    }
+
+    boolean this_present_partitionname = true && this.isSetPartitionname();
+    boolean that_present_partitionname = true && that.isSetPartitionname();
+    if (this_present_partitionname || that_present_partitionname) {
+      if (!(this_present_partitionname && that_present_partitionname))
+        return false;
+      if (!this.partitionname.equals(that.partitionname))
+        return false;
+    }
+
+    boolean this_present_operationType = true && this.isSetOperationType();
+    boolean that_present_operationType = true && that.isSetOperationType();
+    if (this_present_operationType || that_present_operationType) {
+      if (!(this_present_operationType && that_present_operationType))
+        return false;
+      if (!this.operationType.equals(that.operationType))
+        return false;
+    }
+
+    boolean this_present_isTransactional = true && this.isSetIsTransactional();
+    boolean that_present_isTransactional = true && that.isSetIsTransactional();
+    if (this_present_isTransactional || that_present_isTransactional) {
+      if (!(this_present_isTransactional && that_present_isTransactional))
+        return false;
+      if (this.isTransactional != that.isTransactional)
+        return false;
+    }
+
+    boolean this_present_isDynamicPartitionWrite = true && this.isSetIsDynamicPartitionWrite();
+    boolean that_present_isDynamicPartitionWrite = true && that.isSetIsDynamicPartitionWrite();
+    if (this_present_isDynamicPartitionWrite || that_present_isDynamicPartitionWrite) {
+      if (!(this_present_isDynamicPartitionWrite && that_present_isDynamicPartitionWrite))
+        return false;
+      if (this.isDynamicPartitionWrite != that.isDynamicPartitionWrite)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type.getValue());
+
+    boolean present_level = true && (isSetLevel());
+    list.add(present_level);
+    if (present_level)
+      list.add(level.getValue());
+
+    boolean present_dbname = true && (isSetDbname());
+    list.add(present_dbname);
+    if (present_dbname)
+      list.add(dbname);
+
+    boolean present_tablename = true && (isSetTablename());
+    list.add(present_tablename);
+    if (present_tablename)
+      list.add(tablename);
+
+    boolean present_partitionname = true && (isSetPartitionname());
+    list.add(present_partitionname);
+    if (present_partitionname)
+      list.add(partitionname);
+
+    boolean present_operationType = true && (isSetOperationType());
+    list.add(present_operationType);
+    if (present_operationType)
+      list.add(operationType.getValue());
+
+    boolean present_isTransactional = true && (isSetIsTransactional());
+    list.add(present_isTransactional);
+    if (present_isTransactional)
+      list.add(isTransactional);
+
+    boolean present_isDynamicPartitionWrite = true && (isSetIsDynamicPartitionWrite());
+    list.add(present_isDynamicPartitionWrite);
+    if (present_isDynamicPartitionWrite)
+      list.add(isDynamicPartitionWrite);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(LockComponent other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetLevel()).compareTo(other.isSetLevel());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLevel()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.level, other.level);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTablename()).compareTo(other.isSetTablename());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablename()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename, other.tablename);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionname()).compareTo(other.isSetPartitionname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionname, other.partitionname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOperationType()).compareTo(other.isSetOperationType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOperationType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationType, other.operationType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIsTransactional()).compareTo(other.isSetIsTransactional());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsTransactional()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isTransactional, other.isTransactional);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIsDynamicPartitionWrite()).compareTo(other.isSetIsDynamicPartitionWrite());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsDynamicPartitionWrite()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isDynamicPartitionWrite, other.isDynamicPartitionWrite);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("LockComponent(");
+    boolean first = true;
+
+    sb.append("type:");
+    if (this.type == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.type);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("level:");
+    if (this.level == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.level);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbname:");
+    if (this.dbname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbname);
+    }
+    first = false;
+    if (isSetTablename()) {
+      if (!first) sb.append(", ");
+      sb.append("tablename:");
+      if (this.tablename == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.tablename);
+      }
+      first = false;
+    }
+    if (isSetPartitionname()) {
+      if (!first) sb.append(", ");
+      sb.append("partitionname:");
+      if (this.partitionname == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitionname);
+      }
+      first = false;
+    }
+    if (isSetOperationType()) {
+      if (!first) sb.append(", ");
+      sb.append("operationType:");
+      if (this.operationType == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.operationType);
+      }
+      first = false;
+    }
+    if (isSetIsTransactional()) {
+      if (!first) sb.append(", ");
+      sb.append("isTransactional:");
+      sb.append(this.isTransactional);
+      first = false;
+    }
+    if (isSetIsDynamicPartitionWrite()) {
+      if (!first) sb.append(", ");
+      sb.append("isDynamicPartitionWrite:");
+      sb.append(this.isDynamicPartitionWrite);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetType()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'type' is unset! Struct:" + toString());
+    }
+
+    if (!isSetLevel()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'level' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDbname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbname' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class LockComponentStandardSchemeFactory implements SchemeFactory {
+    public LockComponentStandardScheme getScheme() {
+      return new LockComponentStandardScheme();
+    }
+  }
+
+  private static class LockComponentStandardScheme extends StandardScheme<LockComponent> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, LockComponent struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.type = org.apache.hadoop.hive.metastore.api.LockType.findByValue(iprot.readI32());
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // LEVEL
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.level = org.apache.hadoop.hive.metastore.api.LockLevel.findByValue(iprot.readI32());
+              struct.setLevelIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DBNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbname = iprot.readString();
+              struct.setDbnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TABLENAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tablename = iprot.readString();
+              struct.setTablenameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // PARTITIONNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.partitionname = iprot.readString();
+              struct.setPartitionnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // OPERATION_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.findByValue(iprot.readI32());
+              struct.setOperationTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // IS_TRANSACTIONAL
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isTransactional = iprot.readBool();
+              struct.setIsTransactionalIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // IS_DYNAMIC_PARTITION_WRITE
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isDynamicPartitionWrite = iprot.readBool();
+              struct.setIsDynamicPartitionWriteIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, LockComponent struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.type != null) {
+        oprot.writeFieldBegin(TYPE_FIELD_DESC);
+        oprot.writeI32(struct.type.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.level != null) {
+        oprot.writeFieldBegin(LEVEL_FIELD_DESC);
+        oprot.writeI32(struct.level.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbname != null) {
+        oprot.writeFieldBegin(DBNAME_FIELD_DESC);
+        oprot.writeString(struct.dbname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tablename != null) {
+        if (struct.isSetTablename()) {
+          oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
+          oprot.writeString(struct.tablename);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.partitionname != null) {
+        if (struct.isSetPartitionname()) {
+          oprot.writeFieldBegin(PARTITIONNAME_FIELD_DESC);
+          oprot.writeString(struct.partitionname);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.operationType != null) {
+        if (struct.isSetOperationType()) {
+          oprot.writeFieldBegin(OPERATION_TYPE_FIELD_DESC);
+          oprot.writeI32(struct.operationType.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetIsTransactional()) {
+        oprot.writeFieldBegin(IS_TRANSACTIONAL_FIELD_DESC);
+        oprot.writeBool(struct.isTransactional);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetIsDynamicPartitionWrite()) {
+        oprot.writeFieldBegin(IS_DYNAMIC_PARTITION_WRITE_FIELD_DESC);
+        oprot.writeBool(struct.isDynamicPartitionWrite);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class LockComponentTupleSchemeFactory implements SchemeFactory {
+    public LockComponentTupleScheme getScheme() {
+      return new LockComponentTupleScheme();
+    }
+  }
+
+  private static class LockComponentTupleScheme extends TupleScheme<LockComponent> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, LockComponent struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI32(struct.type.getValue());
+      oprot.writeI32(struct.level.getValue());
+      oprot.writeString(struct.dbname);
+      BitSet optionals = new BitSet();
+      if (struct.isSetTablename()) {
+        optionals.set(0);
+      }
+      if (struct.isSetPartitionname()) {
+        optionals.set(1);
+      }
+      if (struct.isSetOperationType()) {
+        optionals.set(2);
+      }
+      if (struct.isSetIsTransactional()) {
+        optionals.set(3);
+      }
+      if (struct.isSetIsDynamicPartitionWrite()) {
+        optionals.set(4);
+      }
+      oprot.writeBitSet(optionals, 5);
+      if (struct.isSetTablename()) {
+        oprot.writeString(struct.tablename);
+      }
+      if (struct.isSetPartitionname()) {
+        oprot.writeString(struct.partitionname);
+      }
+      if (struct.isSetOperationType()) {
+        oprot.writeI32(struct.operationType.getValue());
+      }
+      if (struct.isSetIsTransactional()) {
+        oprot.writeBool(struct.isTransactional);
+      }
+      if (struct.isSetIsDynamicPartitionWrite()) {
+        oprot.writeBool(struct.isDynamicPartitionWrite);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, LockComponent struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.type = org.apache.hadoop.hive.metastore.api.LockType.findByValue(iprot.readI32());
+      struct.setTypeIsSet(true);
+      struct.level = org.apache.hadoop.hive.metastore.api.LockLevel.findByValue(iprot.readI32());
+      struct.setLevelIsSet(true);
+      struct.dbname = iprot.readString();
+      struct.setDbnameIsSet(true);
+      BitSet incoming = iprot.readBitSet(5);
+      if (incoming.get(0)) {
+        struct.tablename = iprot.readString();
+        struct.setTablenameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.partitionname = iprot.readString();
+        struct.setPartitionnameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.findByValue(iprot.readI32());
+        struct.setOperationTypeIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.isTransactional = iprot.readBool();
+        struct.setIsTransactionalIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.isDynamicPartitionWrite = iprot.readBool();
+        struct.setIsDynamicPartitionWriteIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockLevel.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockLevel.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockLevel.java
new file mode 100644
index 0000000..e58ea46
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockLevel.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum LockLevel implements org.apache.thrift.TEnum {
+  DB(1),
+  TABLE(2),
+  PARTITION(3);
+
+  private final int value;
+
+  private LockLevel(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static LockLevel findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return DB;
+      case 2:
+        return TABLE;
+      case 3:
+        return PARTITION;
+      default:
+        return null;
+    }
+  }
+}


[14/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
new file mode 100644
index 0000000..e0e1cd4
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEvent.java
@@ -0,0 +1,1112 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotificationEvent implements org.apache.thrift.TBase<NotificationEvent, NotificationEvent._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEvent> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEvent");
+
+  private static final org.apache.thrift.protocol.TField EVENT_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("eventId", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField EVENT_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("eventTime", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField EVENT_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("eventType", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField MESSAGE_FORMAT_FIELD_DESC = new org.apache.thrift.protocol.TField("messageFormat", org.apache.thrift.protocol.TType.STRING, (short)7);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)8);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotificationEventStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotificationEventTupleSchemeFactory());
+  }
+
+  private long eventId; // required
+  private int eventTime; // required
+  private String eventType; // required
+  private String dbName; // optional
+  private String tableName; // optional
+  private String message; // required
+  private String messageFormat; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    EVENT_ID((short)1, "eventId"),
+    EVENT_TIME((short)2, "eventTime"),
+    EVENT_TYPE((short)3, "eventType"),
+    DB_NAME((short)4, "dbName"),
+    TABLE_NAME((short)5, "tableName"),
+    MESSAGE((short)6, "message"),
+    MESSAGE_FORMAT((short)7, "messageFormat"),
+    CAT_NAME((short)8, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // EVENT_ID
+          return EVENT_ID;
+        case 2: // EVENT_TIME
+          return EVENT_TIME;
+        case 3: // EVENT_TYPE
+          return EVENT_TYPE;
+        case 4: // DB_NAME
+          return DB_NAME;
+        case 5: // TABLE_NAME
+          return TABLE_NAME;
+        case 6: // MESSAGE
+          return MESSAGE;
+        case 7: // MESSAGE_FORMAT
+          return MESSAGE_FORMAT;
+        case 8: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __EVENTID_ISSET_ID = 0;
+  private static final int __EVENTTIME_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.DB_NAME,_Fields.TABLE_NAME,_Fields.MESSAGE_FORMAT,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.EVENT_ID, new org.apache.thrift.meta_data.FieldMetaData("eventId", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.EVENT_TIME, new org.apache.thrift.meta_data.FieldMetaData("eventTime", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.EVENT_TYPE, new org.apache.thrift.meta_data.FieldMetaData("eventType", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.MESSAGE_FORMAT, new org.apache.thrift.meta_data.FieldMetaData("messageFormat", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEvent.class, metaDataMap);
+  }
+
+  public NotificationEvent() {
+  }
+
+  public NotificationEvent(
+    long eventId,
+    int eventTime,
+    String eventType,
+    String message)
+  {
+    this();
+    this.eventId = eventId;
+    setEventIdIsSet(true);
+    this.eventTime = eventTime;
+    setEventTimeIsSet(true);
+    this.eventType = eventType;
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotificationEvent(NotificationEvent other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.eventId = other.eventId;
+    this.eventTime = other.eventTime;
+    if (other.isSetEventType()) {
+      this.eventType = other.eventType;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+    if (other.isSetMessageFormat()) {
+      this.messageFormat = other.messageFormat;
+    }
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public NotificationEvent deepCopy() {
+    return new NotificationEvent(this);
+  }
+
+  @Override
+  public void clear() {
+    setEventIdIsSet(false);
+    this.eventId = 0;
+    setEventTimeIsSet(false);
+    this.eventTime = 0;
+    this.eventType = null;
+    this.dbName = null;
+    this.tableName = null;
+    this.message = null;
+    this.messageFormat = null;
+    this.catName = null;
+  }
+
+  public long getEventId() {
+    return this.eventId;
+  }
+
+  public void setEventId(long eventId) {
+    this.eventId = eventId;
+    setEventIdIsSet(true);
+  }
+
+  public void unsetEventId() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EVENTID_ISSET_ID);
+  }
+
+  /** Returns true if field eventId is set (has been assigned a value) and false otherwise */
+  public boolean isSetEventId() {
+    return EncodingUtils.testBit(__isset_bitfield, __EVENTID_ISSET_ID);
+  }
+
+  public void setEventIdIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EVENTID_ISSET_ID, value);
+  }
+
+  public int getEventTime() {
+    return this.eventTime;
+  }
+
+  public void setEventTime(int eventTime) {
+    this.eventTime = eventTime;
+    setEventTimeIsSet(true);
+  }
+
+  public void unsetEventTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __EVENTTIME_ISSET_ID);
+  }
+
+  /** Returns true if field eventTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetEventTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __EVENTTIME_ISSET_ID);
+  }
+
+  public void setEventTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __EVENTTIME_ISSET_ID, value);
+  }
+
+  public String getEventType() {
+    return this.eventType;
+  }
+
+  public void setEventType(String eventType) {
+    this.eventType = eventType;
+  }
+
+  public void unsetEventType() {
+    this.eventType = null;
+  }
+
+  /** Returns true if field eventType is set (has been assigned a value) and false otherwise */
+  public boolean isSetEventType() {
+    return this.eventType != null;
+  }
+
+  public void setEventTypeIsSet(boolean value) {
+    if (!value) {
+      this.eventType = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public String getMessageFormat() {
+    return this.messageFormat;
+  }
+
+  public void setMessageFormat(String messageFormat) {
+    this.messageFormat = messageFormat;
+  }
+
+  public void unsetMessageFormat() {
+    this.messageFormat = null;
+  }
+
+  /** Returns true if field messageFormat is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessageFormat() {
+    return this.messageFormat != null;
+  }
+
+  public void setMessageFormatIsSet(boolean value) {
+    if (!value) {
+      this.messageFormat = null;
+    }
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case EVENT_ID:
+      if (value == null) {
+        unsetEventId();
+      } else {
+        setEventId((Long)value);
+      }
+      break;
+
+    case EVENT_TIME:
+      if (value == null) {
+        unsetEventTime();
+      } else {
+        setEventTime((Integer)value);
+      }
+      break;
+
+    case EVENT_TYPE:
+      if (value == null) {
+        unsetEventType();
+      } else {
+        setEventType((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    case MESSAGE_FORMAT:
+      if (value == null) {
+        unsetMessageFormat();
+      } else {
+        setMessageFormat((String)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case EVENT_ID:
+      return getEventId();
+
+    case EVENT_TIME:
+      return getEventTime();
+
+    case EVENT_TYPE:
+      return getEventType();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case MESSAGE:
+      return getMessage();
+
+    case MESSAGE_FORMAT:
+      return getMessageFormat();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case EVENT_ID:
+      return isSetEventId();
+    case EVENT_TIME:
+      return isSetEventTime();
+    case EVENT_TYPE:
+      return isSetEventType();
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case MESSAGE:
+      return isSetMessage();
+    case MESSAGE_FORMAT:
+      return isSetMessageFormat();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotificationEvent)
+      return this.equals((NotificationEvent)that);
+    return false;
+  }
+
+  public boolean equals(NotificationEvent that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_eventId = true;
+    boolean that_present_eventId = true;
+    if (this_present_eventId || that_present_eventId) {
+      if (!(this_present_eventId && that_present_eventId))
+        return false;
+      if (this.eventId != that.eventId)
+        return false;
+    }
+
+    boolean this_present_eventTime = true;
+    boolean that_present_eventTime = true;
+    if (this_present_eventTime || that_present_eventTime) {
+      if (!(this_present_eventTime && that_present_eventTime))
+        return false;
+      if (this.eventTime != that.eventTime)
+        return false;
+    }
+
+    boolean this_present_eventType = true && this.isSetEventType();
+    boolean that_present_eventType = true && that.isSetEventType();
+    if (this_present_eventType || that_present_eventType) {
+      if (!(this_present_eventType && that_present_eventType))
+        return false;
+      if (!this.eventType.equals(that.eventType))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    boolean this_present_messageFormat = true && this.isSetMessageFormat();
+    boolean that_present_messageFormat = true && that.isSetMessageFormat();
+    if (this_present_messageFormat || that_present_messageFormat) {
+      if (!(this_present_messageFormat && that_present_messageFormat))
+        return false;
+      if (!this.messageFormat.equals(that.messageFormat))
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_eventId = true;
+    list.add(present_eventId);
+    if (present_eventId)
+      list.add(eventId);
+
+    boolean present_eventTime = true;
+    list.add(present_eventTime);
+    if (present_eventTime)
+      list.add(eventTime);
+
+    boolean present_eventType = true && (isSetEventType());
+    list.add(present_eventType);
+    if (present_eventType)
+      list.add(eventType);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    boolean present_messageFormat = true && (isSetMessageFormat());
+    list.add(present_messageFormat);
+    if (present_messageFormat)
+      list.add(messageFormat);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotificationEvent other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetEventId()).compareTo(other.isSetEventId());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEventId()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventId, other.eventId);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEventTime()).compareTo(other.isSetEventTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEventTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventTime, other.eventTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetEventType()).compareTo(other.isSetEventType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEventType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.eventType, other.eventType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMessageFormat()).compareTo(other.isSetMessageFormat());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessageFormat()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.messageFormat, other.messageFormat);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotificationEvent(");
+    boolean first = true;
+
+    sb.append("eventId:");
+    sb.append(this.eventId);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("eventTime:");
+    sb.append(this.eventTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("eventType:");
+    if (this.eventType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.eventType);
+    }
+    first = false;
+    if (isSetDbName()) {
+      if (!first) sb.append(", ");
+      sb.append("dbName:");
+      if (this.dbName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.dbName);
+      }
+      first = false;
+    }
+    if (isSetTableName()) {
+      if (!first) sb.append(", ");
+      sb.append("tableName:");
+      if (this.tableName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.tableName);
+      }
+      first = false;
+    }
+    if (!first) sb.append(", ");
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    if (isSetMessageFormat()) {
+      if (!first) sb.append(", ");
+      sb.append("messageFormat:");
+      if (this.messageFormat == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.messageFormat);
+      }
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetEventId()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'eventId' is unset! Struct:" + toString());
+    }
+
+    if (!isSetEventTime()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'eventTime' is unset! Struct:" + toString());
+    }
+
+    if (!isSetEventType()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'eventType' is unset! Struct:" + toString());
+    }
+
+    if (!isSetMessage()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'message' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotificationEventStandardSchemeFactory implements SchemeFactory {
+    public NotificationEventStandardScheme getScheme() {
+      return new NotificationEventStandardScheme();
+    }
+  }
+
+  private static class NotificationEventStandardScheme extends StandardScheme<NotificationEvent> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEvent struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // EVENT_ID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.eventId = iprot.readI64();
+              struct.setEventIdIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // EVENT_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.eventTime = iprot.readI32();
+              struct.setEventTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // EVENT_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.eventType = iprot.readString();
+              struct.setEventTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // MESSAGE_FORMAT
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.messageFormat = iprot.readString();
+              struct.setMessageFormatIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 8: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEvent struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(EVENT_ID_FIELD_DESC);
+      oprot.writeI64(struct.eventId);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(EVENT_TIME_FIELD_DESC);
+      oprot.writeI32(struct.eventTime);
+      oprot.writeFieldEnd();
+      if (struct.eventType != null) {
+        oprot.writeFieldBegin(EVENT_TYPE_FIELD_DESC);
+        oprot.writeString(struct.eventType);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        if (struct.isSetDbName()) {
+          oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+          oprot.writeString(struct.dbName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.tableName != null) {
+        if (struct.isSetTableName()) {
+          oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+          oprot.writeString(struct.tableName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      if (struct.messageFormat != null) {
+        if (struct.isSetMessageFormat()) {
+          oprot.writeFieldBegin(MESSAGE_FORMAT_FIELD_DESC);
+          oprot.writeString(struct.messageFormat);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotificationEventTupleSchemeFactory implements SchemeFactory {
+    public NotificationEventTupleScheme getScheme() {
+      return new NotificationEventTupleScheme();
+    }
+  }
+
+  private static class NotificationEventTupleScheme extends TupleScheme<NotificationEvent> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEvent struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.eventId);
+      oprot.writeI32(struct.eventTime);
+      oprot.writeString(struct.eventType);
+      oprot.writeString(struct.message);
+      BitSet optionals = new BitSet();
+      if (struct.isSetDbName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetTableName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetMessageFormat()) {
+        optionals.set(2);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(3);
+      }
+      oprot.writeBitSet(optionals, 4);
+      if (struct.isSetDbName()) {
+        oprot.writeString(struct.dbName);
+      }
+      if (struct.isSetTableName()) {
+        oprot.writeString(struct.tableName);
+      }
+      if (struct.isSetMessageFormat()) {
+        oprot.writeString(struct.messageFormat);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEvent struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.eventId = iprot.readI64();
+      struct.setEventIdIsSet(true);
+      struct.eventTime = iprot.readI32();
+      struct.setEventTimeIsSet(true);
+      struct.eventType = iprot.readString();
+      struct.setEventTypeIsSet(true);
+      struct.message = iprot.readString();
+      struct.setMessageIsSet(true);
+      BitSet incoming = iprot.readBitSet(4);
+      if (incoming.get(0)) {
+        struct.dbName = iprot.readString();
+        struct.setDbNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.tableName = iprot.readString();
+        struct.setTableNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.messageFormat = iprot.readString();
+        struct.setMessageFormatIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
new file mode 100644
index 0000000..f016204
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventRequest.java
@@ -0,0 +1,490 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotificationEventRequest implements org.apache.thrift.TBase<NotificationEventRequest, NotificationEventRequest._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventRequest");
+
+  private static final org.apache.thrift.protocol.TField LAST_EVENT_FIELD_DESC = new org.apache.thrift.protocol.TField("lastEvent", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField MAX_EVENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxEvents", org.apache.thrift.protocol.TType.I32, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotificationEventRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotificationEventRequestTupleSchemeFactory());
+  }
+
+  private long lastEvent; // required
+  private int maxEvents; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LAST_EVENT((short)1, "lastEvent"),
+    MAX_EVENTS((short)2, "maxEvents");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LAST_EVENT
+          return LAST_EVENT;
+        case 2: // MAX_EVENTS
+          return MAX_EVENTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __LASTEVENT_ISSET_ID = 0;
+  private static final int __MAXEVENTS_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.MAX_EVENTS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LAST_EVENT, new org.apache.thrift.meta_data.FieldMetaData("lastEvent", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.MAX_EVENTS, new org.apache.thrift.meta_data.FieldMetaData("maxEvents", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEventRequest.class, metaDataMap);
+  }
+
+  public NotificationEventRequest() {
+  }
+
+  public NotificationEventRequest(
+    long lastEvent)
+  {
+    this();
+    this.lastEvent = lastEvent;
+    setLastEventIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotificationEventRequest(NotificationEventRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.lastEvent = other.lastEvent;
+    this.maxEvents = other.maxEvents;
+  }
+
+  public NotificationEventRequest deepCopy() {
+    return new NotificationEventRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setLastEventIsSet(false);
+    this.lastEvent = 0;
+    setMaxEventsIsSet(false);
+    this.maxEvents = 0;
+  }
+
+  public long getLastEvent() {
+    return this.lastEvent;
+  }
+
+  public void setLastEvent(long lastEvent) {
+    this.lastEvent = lastEvent;
+    setLastEventIsSet(true);
+  }
+
+  public void unsetLastEvent() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LASTEVENT_ISSET_ID);
+  }
+
+  /** Returns true if field lastEvent is set (has been assigned a value) and false otherwise */
+  public boolean isSetLastEvent() {
+    return EncodingUtils.testBit(__isset_bitfield, __LASTEVENT_ISSET_ID);
+  }
+
+  public void setLastEventIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTEVENT_ISSET_ID, value);
+  }
+
+  public int getMaxEvents() {
+    return this.maxEvents;
+  }
+
+  public void setMaxEvents(int maxEvents) {
+    this.maxEvents = maxEvents;
+    setMaxEventsIsSet(true);
+  }
+
+  public void unsetMaxEvents() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXEVENTS_ISSET_ID);
+  }
+
+  /** Returns true if field maxEvents is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaxEvents() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAXEVENTS_ISSET_ID);
+  }
+
+  public void setMaxEventsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXEVENTS_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LAST_EVENT:
+      if (value == null) {
+        unsetLastEvent();
+      } else {
+        setLastEvent((Long)value);
+      }
+      break;
+
+    case MAX_EVENTS:
+      if (value == null) {
+        unsetMaxEvents();
+      } else {
+        setMaxEvents((Integer)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LAST_EVENT:
+      return getLastEvent();
+
+    case MAX_EVENTS:
+      return getMaxEvents();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LAST_EVENT:
+      return isSetLastEvent();
+    case MAX_EVENTS:
+      return isSetMaxEvents();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotificationEventRequest)
+      return this.equals((NotificationEventRequest)that);
+    return false;
+  }
+
+  public boolean equals(NotificationEventRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lastEvent = true;
+    boolean that_present_lastEvent = true;
+    if (this_present_lastEvent || that_present_lastEvent) {
+      if (!(this_present_lastEvent && that_present_lastEvent))
+        return false;
+      if (this.lastEvent != that.lastEvent)
+        return false;
+    }
+
+    boolean this_present_maxEvents = true && this.isSetMaxEvents();
+    boolean that_present_maxEvents = true && that.isSetMaxEvents();
+    if (this_present_maxEvents || that_present_maxEvents) {
+      if (!(this_present_maxEvents && that_present_maxEvents))
+        return false;
+      if (this.maxEvents != that.maxEvents)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lastEvent = true;
+    list.add(present_lastEvent);
+    if (present_lastEvent)
+      list.add(lastEvent);
+
+    boolean present_maxEvents = true && (isSetMaxEvents());
+    list.add(present_maxEvents);
+    if (present_maxEvents)
+      list.add(maxEvents);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotificationEventRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLastEvent()).compareTo(other.isSetLastEvent());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLastEvent()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lastEvent, other.lastEvent);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMaxEvents()).compareTo(other.isSetMaxEvents());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaxEvents()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxEvents, other.maxEvents);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotificationEventRequest(");
+    boolean first = true;
+
+    sb.append("lastEvent:");
+    sb.append(this.lastEvent);
+    first = false;
+    if (isSetMaxEvents()) {
+      if (!first) sb.append(", ");
+      sb.append("maxEvents:");
+      sb.append(this.maxEvents);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetLastEvent()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'lastEvent' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotificationEventRequestStandardSchemeFactory implements SchemeFactory {
+    public NotificationEventRequestStandardScheme getScheme() {
+      return new NotificationEventRequestStandardScheme();
+    }
+  }
+
+  private static class NotificationEventRequestStandardScheme extends StandardScheme<NotificationEventRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LAST_EVENT
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.lastEvent = iprot.readI64();
+              struct.setLastEventIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // MAX_EVENTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.maxEvents = iprot.readI32();
+              struct.setMaxEventsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(LAST_EVENT_FIELD_DESC);
+      oprot.writeI64(struct.lastEvent);
+      oprot.writeFieldEnd();
+      if (struct.isSetMaxEvents()) {
+        oprot.writeFieldBegin(MAX_EVENTS_FIELD_DESC);
+        oprot.writeI32(struct.maxEvents);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotificationEventRequestTupleSchemeFactory implements SchemeFactory {
+    public NotificationEventRequestTupleScheme getScheme() {
+      return new NotificationEventRequestTupleScheme();
+    }
+  }
+
+  private static class NotificationEventRequestTupleScheme extends TupleScheme<NotificationEventRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.lastEvent);
+      BitSet optionals = new BitSet();
+      if (struct.isSetMaxEvents()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMaxEvents()) {
+        oprot.writeI32(struct.maxEvents);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.lastEvent = iprot.readI64();
+      struct.setLastEventIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.maxEvents = iprot.readI32();
+        struct.setMaxEventsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
new file mode 100644
index 0000000..9228c39
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class NotificationEventResponse implements org.apache.thrift.TBase<NotificationEventResponse, NotificationEventResponse._Fields>, java.io.Serializable, Cloneable, Comparable<NotificationEventResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("NotificationEventResponse");
+
+  private static final org.apache.thrift.protocol.TField EVENTS_FIELD_DESC = new org.apache.thrift.protocol.TField("events", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new NotificationEventResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new NotificationEventResponseTupleSchemeFactory());
+  }
+
+  private List<NotificationEvent> events; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    EVENTS((short)1, "events");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // EVENTS
+          return EVENTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.EVENTS, new org.apache.thrift.meta_data.FieldMetaData("events", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NotificationEvent.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(NotificationEventResponse.class, metaDataMap);
+  }
+
+  public NotificationEventResponse() {
+  }
+
+  public NotificationEventResponse(
+    List<NotificationEvent> events)
+  {
+    this();
+    this.events = events;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public NotificationEventResponse(NotificationEventResponse other) {
+    if (other.isSetEvents()) {
+      List<NotificationEvent> __this__events = new ArrayList<NotificationEvent>(other.events.size());
+      for (NotificationEvent other_element : other.events) {
+        __this__events.add(new NotificationEvent(other_element));
+      }
+      this.events = __this__events;
+    }
+  }
+
+  public NotificationEventResponse deepCopy() {
+    return new NotificationEventResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.events = null;
+  }
+
+  public int getEventsSize() {
+    return (this.events == null) ? 0 : this.events.size();
+  }
+
+  public java.util.Iterator<NotificationEvent> getEventsIterator() {
+    return (this.events == null) ? null : this.events.iterator();
+  }
+
+  public void addToEvents(NotificationEvent elem) {
+    if (this.events == null) {
+      this.events = new ArrayList<NotificationEvent>();
+    }
+    this.events.add(elem);
+  }
+
+  public List<NotificationEvent> getEvents() {
+    return this.events;
+  }
+
+  public void setEvents(List<NotificationEvent> events) {
+    this.events = events;
+  }
+
+  public void unsetEvents() {
+    this.events = null;
+  }
+
+  /** Returns true if field events is set (has been assigned a value) and false otherwise */
+  public boolean isSetEvents() {
+    return this.events != null;
+  }
+
+  public void setEventsIsSet(boolean value) {
+    if (!value) {
+      this.events = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case EVENTS:
+      if (value == null) {
+        unsetEvents();
+      } else {
+        setEvents((List<NotificationEvent>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case EVENTS:
+      return getEvents();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case EVENTS:
+      return isSetEvents();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof NotificationEventResponse)
+      return this.equals((NotificationEventResponse)that);
+    return false;
+  }
+
+  public boolean equals(NotificationEventResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_events = true && this.isSetEvents();
+    boolean that_present_events = true && that.isSetEvents();
+    if (this_present_events || that_present_events) {
+      if (!(this_present_events && that_present_events))
+        return false;
+      if (!this.events.equals(that.events))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_events = true && (isSetEvents());
+    list.add(present_events);
+    if (present_events)
+      list.add(events);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(NotificationEventResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetEvents()).compareTo(other.isSetEvents());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetEvents()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.events, other.events);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("NotificationEventResponse(");
+    boolean first = true;
+
+    sb.append("events:");
+    if (this.events == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.events);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetEvents()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'events' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class NotificationEventResponseStandardSchemeFactory implements SchemeFactory {
+    public NotificationEventResponseStandardScheme getScheme() {
+      return new NotificationEventResponseStandardScheme();
+    }
+  }
+
+  private static class NotificationEventResponseStandardScheme extends StandardScheme<NotificationEventResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // EVENTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list724 = iprot.readListBegin();
+                struct.events = new ArrayList<NotificationEvent>(_list724.size);
+                NotificationEvent _elem725;
+                for (int _i726 = 0; _i726 < _list724.size; ++_i726)
+                {
+                  _elem725 = new NotificationEvent();
+                  _elem725.read(iprot);
+                  struct.events.add(_elem725);
+                }
+                iprot.readListEnd();
+              }
+              struct.setEventsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.events != null) {
+        oprot.writeFieldBegin(EVENTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size()));
+          for (NotificationEvent _iter727 : struct.events)
+          {
+            _iter727.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class NotificationEventResponseTupleSchemeFactory implements SchemeFactory {
+    public NotificationEventResponseTupleScheme getScheme() {
+      return new NotificationEventResponseTupleScheme();
+    }
+  }
+
+  private static class NotificationEventResponseTupleScheme extends TupleScheme<NotificationEventResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.events.size());
+        for (NotificationEvent _iter728 : struct.events)
+        {
+          _iter728.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list729 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.events = new ArrayList<NotificationEvent>(_list729.size);
+        NotificationEvent _elem730;
+        for (int _i731 = 0; _i731 < _list729.size; ++_i731)
+        {
+          _elem730 = new NotificationEvent();
+          _elem730.read(iprot);
+          struct.events.add(_elem730);
+        }
+      }
+      struct.setEventsIsSet(true);
+    }
+  }
+
+}
+


[78/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
index 0000000,a5bcc10..0af7238
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py
@@@ -1,0 -1,48956 +1,49183 @@@
+ #
+ # Autogenerated by Thrift Compiler (0.9.3)
+ #
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ #
+ #  options string: py
+ #
+ 
+ from thrift.Thrift import TType, TMessageType, TException, TApplicationException
+ import fb303.FacebookService
+ import logging
+ from ttypes import *
+ from thrift.Thrift import TProcessor
+ from thrift.transport import TTransport
+ from thrift.protocol import TBinaryProtocol, TProtocol
+ try:
+   from thrift.protocol import fastbinary
+ except:
+   fastbinary = None
+ 
+ 
+ class Iface(fb303.FacebookService.Iface):
+   """
+   This interface is live.
+   """
+   def getMetaConf(self, key):
+     """
+     Parameters:
+      - key
+     """
+     pass
+ 
+   def setMetaConf(self, key, value):
+     """
+     Parameters:
+      - key
+      - value
+     """
+     pass
+ 
+   def create_catalog(self, catalog):
+     """
+     Parameters:
+      - catalog
+     """
+     pass
+ 
+   def alter_catalog(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def get_catalog(self, catName):
+     """
+     Parameters:
+      - catName
+     """
+     pass
+ 
+   def get_catalogs(self):
+     pass
+ 
+   def drop_catalog(self, catName):
+     """
+     Parameters:
+      - catName
+     """
+     pass
+ 
+   def create_database(self, database):
+     """
+     Parameters:
+      - database
+     """
+     pass
+ 
+   def get_database(self, name):
+     """
+     Parameters:
+      - name
+     """
+     pass
+ 
+   def drop_database(self, name, deleteData, cascade):
+     """
+     Parameters:
+      - name
+      - deleteData
+      - cascade
+     """
+     pass
+ 
+   def get_databases(self, pattern):
+     """
+     Parameters:
+      - pattern
+     """
+     pass
+ 
+   def get_all_databases(self):
+     pass
+ 
+   def alter_database(self, dbname, db):
+     """
+     Parameters:
+      - dbname
+      - db
+     """
+     pass
+ 
+   def get_type(self, name):
+     """
+     Parameters:
+      - name
+     """
+     pass
+ 
+   def create_type(self, type):
+     """
+     Parameters:
+      - type
+     """
+     pass
+ 
+   def drop_type(self, type):
+     """
+     Parameters:
+      - type
+     """
+     pass
+ 
+   def get_type_all(self, name):
+     """
+     Parameters:
+      - name
+     """
+     pass
+ 
+   def get_fields(self, db_name, table_name):
+     """
+     Parameters:
+      - db_name
+      - table_name
+     """
+     pass
+ 
+   def get_fields_with_environment_context(self, db_name, table_name, environment_context):
+     """
+     Parameters:
+      - db_name
+      - table_name
+      - environment_context
+     """
+     pass
+ 
+   def get_schema(self, db_name, table_name):
+     """
+     Parameters:
+      - db_name
+      - table_name
+     """
+     pass
+ 
+   def get_schema_with_environment_context(self, db_name, table_name, environment_context):
+     """
+     Parameters:
+      - db_name
+      - table_name
+      - environment_context
+     """
+     pass
+ 
+   def create_table(self, tbl):
+     """
+     Parameters:
+      - tbl
+     """
+     pass
+ 
+   def create_table_with_environment_context(self, tbl, environment_context):
+     """
+     Parameters:
+      - tbl
+      - environment_context
+     """
+     pass
+ 
+   def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints):
+     """
+     Parameters:
+      - tbl
+      - primaryKeys
+      - foreignKeys
+      - uniqueConstraints
+      - notNullConstraints
+      - defaultConstraints
+      - checkConstraints
+     """
+     pass
+ 
+   def drop_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def add_primary_key(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def add_foreign_key(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def add_unique_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def add_not_null_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def add_default_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def add_check_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def drop_table(self, dbname, name, deleteData):
+     """
+     Parameters:
+      - dbname
+      - name
+      - deleteData
+     """
+     pass
+ 
+   def drop_table_with_environment_context(self, dbname, name, deleteData, environment_context):
+     """
+     Parameters:
+      - dbname
+      - name
+      - deleteData
+      - environment_context
+     """
+     pass
+ 
+   def truncate_table(self, dbName, tableName, partNames):
+     """
+     Parameters:
+      - dbName
+      - tableName
+      - partNames
+     """
+     pass
+ 
+   def get_tables(self, db_name, pattern):
+     """
+     Parameters:
+      - db_name
+      - pattern
+     """
+     pass
+ 
+   def get_tables_by_type(self, db_name, pattern, tableType):
+     """
+     Parameters:
+      - db_name
+      - pattern
+      - tableType
+     """
+     pass
+ 
+   def get_materialized_views_for_rewriting(self, db_name):
+     """
+     Parameters:
+      - db_name
+     """
+     pass
+ 
+   def get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
+     """
+     Parameters:
+      - db_patterns
+      - tbl_patterns
+      - tbl_types
+     """
+     pass
+ 
+   def get_all_tables(self, db_name):
+     """
+     Parameters:
+      - db_name
+     """
+     pass
+ 
+   def get_table(self, dbname, tbl_name):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+     """
+     pass
+ 
+   def get_table_objects_by_name(self, dbname, tbl_names):
+     """
+     Parameters:
+      - dbname
+      - tbl_names
+     """
+     pass
+ 
+   def get_table_req(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def get_table_objects_by_name_req(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def get_materialization_invalidation_info(self, dbname, tbl_names):
+     """
+     Parameters:
+      - dbname
+      - tbl_names
+     """
+     pass
+ 
+   def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata):
+     """
+     Parameters:
+      - catName
+      - dbname
+      - tbl_name
+      - creation_metadata
+     """
+     pass
+ 
+   def get_table_names_by_filter(self, dbname, filter, max_tables):
+     """
+     Parameters:
+      - dbname
+      - filter
+      - max_tables
+     """
+     pass
+ 
+   def alter_table(self, dbname, tbl_name, new_tbl):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+      - new_tbl
+     """
+     pass
+ 
+   def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+      - new_tbl
+      - environment_context
+     """
+     pass
+ 
+   def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+      - new_tbl
+      - cascade
+     """
+     pass
+ 
+   def add_partition(self, new_part):
+     """
+     Parameters:
+      - new_part
+     """
+     pass
+ 
+   def add_partition_with_environment_context(self, new_part, environment_context):
+     """
+     Parameters:
+      - new_part
+      - environment_context
+     """
+     pass
+ 
+   def add_partitions(self, new_parts):
+     """
+     Parameters:
+      - new_parts
+     """
+     pass
+ 
+   def add_partitions_pspec(self, new_parts):
+     """
+     Parameters:
+      - new_parts
+     """
+     pass
+ 
+   def append_partition(self, db_name, tbl_name, part_vals):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+     """
+     pass
+ 
+   def add_partitions_req(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def append_partition_with_environment_context(self, db_name, tbl_name, part_vals, environment_context):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - environment_context
+     """
+     pass
+ 
+   def append_partition_by_name(self, db_name, tbl_name, part_name):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+     """
+     pass
+ 
+   def append_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, environment_context):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+      - environment_context
+     """
+     pass
+ 
+   def drop_partition(self, db_name, tbl_name, part_vals, deleteData):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - deleteData
+     """
+     pass
+ 
+   def drop_partition_with_environment_context(self, db_name, tbl_name, part_vals, deleteData, environment_context):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - deleteData
+      - environment_context
+     """
+     pass
+ 
+   def drop_partition_by_name(self, db_name, tbl_name, part_name, deleteData):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+      - deleteData
+     """
+     pass
+ 
+   def drop_partition_by_name_with_environment_context(self, db_name, tbl_name, part_name, deleteData, environment_context):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+      - deleteData
+      - environment_context
+     """
+     pass
+ 
+   def drop_partitions_req(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def get_partition(self, db_name, tbl_name, part_vals):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+     """
+     pass
+ 
+   def exchange_partition(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+     """
+     Parameters:
+      - partitionSpecs
+      - source_db
+      - source_table_name
+      - dest_db
+      - dest_table_name
+     """
+     pass
+ 
+   def exchange_partitions(self, partitionSpecs, source_db, source_table_name, dest_db, dest_table_name):
+     """
+     Parameters:
+      - partitionSpecs
+      - source_db
+      - source_table_name
+      - dest_db
+      - dest_table_name
+     """
+     pass
+ 
+   def get_partition_with_auth(self, db_name, tbl_name, part_vals, user_name, group_names):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - user_name
+      - group_names
+     """
+     pass
+ 
+   def get_partition_by_name(self, db_name, tbl_name, part_name):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+     """
+     pass
+ 
+   def get_partitions(self, db_name, tbl_name, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - max_parts
+     """
+     pass
+ 
+   def get_partitions_with_auth(self, db_name, tbl_name, max_parts, user_name, group_names):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - max_parts
+      - user_name
+      - group_names
+     """
+     pass
+ 
+   def get_partitions_pspec(self, db_name, tbl_name, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - max_parts
+     """
+     pass
+ 
+   def get_partition_names(self, db_name, tbl_name, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - max_parts
+     """
+     pass
+ 
+   def get_partition_values(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_partitions_ps(self, db_name, tbl_name, part_vals, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - max_parts
+     """
+     pass
+ 
+   def get_partitions_ps_with_auth(self, db_name, tbl_name, part_vals, max_parts, user_name, group_names):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - max_parts
+      - user_name
+      - group_names
+     """
+     pass
+ 
+   def get_partition_names_ps(self, db_name, tbl_name, part_vals, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - max_parts
+     """
+     pass
+ 
+   def get_partitions_by_filter(self, db_name, tbl_name, filter, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - filter
+      - max_parts
+     """
+     pass
+ 
+   def get_part_specs_by_filter(self, db_name, tbl_name, filter, max_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - filter
+      - max_parts
+     """
+     pass
+ 
+   def get_partitions_by_expr(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def get_num_partitions_by_filter(self, db_name, tbl_name, filter):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - filter
+     """
+     pass
+ 
+   def get_partitions_by_names(self, db_name, tbl_name, names):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - names
+     """
+     pass
+ 
+   def alter_partition(self, db_name, tbl_name, new_part):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - new_part
+     """
+     pass
+ 
+   def alter_partitions(self, db_name, tbl_name, new_parts):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - new_parts
+     """
+     pass
+ 
+   def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - new_parts
+      - environment_context
+     """
+     pass
+ 
++  def alter_partitions_with_environment_context_req(self, req):
++    """
++    Parameters:
++     - req
++    """
++    pass
++
+   def alter_partition_with_environment_context(self, db_name, tbl_name, new_part, environment_context):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - new_part
+      - environment_context
+     """
+     pass
+ 
+   def rename_partition(self, db_name, tbl_name, part_vals, new_part):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - new_part
+     """
+     pass
+ 
+   def partition_name_has_valid_characters(self, part_vals, throw_exception):
+     """
+     Parameters:
+      - part_vals
+      - throw_exception
+     """
+     pass
+ 
+   def get_config_value(self, name, defaultValue):
+     """
+     Parameters:
+      - name
+      - defaultValue
+     """
+     pass
+ 
+   def partition_name_to_vals(self, part_name):
+     """
+     Parameters:
+      - part_name
+     """
+     pass
+ 
+   def partition_name_to_spec(self, part_name):
+     """
+     Parameters:
+      - part_name
+     """
+     pass
+ 
+   def markPartitionForEvent(self, db_name, tbl_name, part_vals, eventType):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - eventType
+     """
+     pass
+ 
+   def isPartitionMarkedForEvent(self, db_name, tbl_name, part_vals, eventType):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_vals
+      - eventType
+     """
+     pass
+ 
+   def get_primary_keys(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_foreign_keys(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_unique_constraints(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_not_null_constraints(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_default_constraints(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_check_constraints(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def update_table_column_statistics(self, stats_obj):
+     """
+     Parameters:
+      - stats_obj
+     """
+     pass
+ 
+   def update_partition_column_statistics(self, stats_obj):
+     """
+     Parameters:
+      - stats_obj
+     """
+     pass
+ 
+   def get_table_column_statistics(self, db_name, tbl_name, col_name):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - col_name
+     """
+     pass
+ 
+   def get_partition_column_statistics(self, db_name, tbl_name, part_name, col_name):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+      - col_name
+     """
+     pass
+ 
+   def get_table_statistics_req(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_partitions_statistics_req(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_aggr_stats_for(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def set_aggr_stats_for(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - part_name
+      - col_name
+     """
+     pass
+ 
+   def delete_table_column_statistics(self, db_name, tbl_name, col_name):
+     """
+     Parameters:
+      - db_name
+      - tbl_name
+      - col_name
+     """
+     pass
+ 
+   def create_function(self, func):
+     """
+     Parameters:
+      - func
+     """
+     pass
+ 
+   def drop_function(self, dbName, funcName):
+     """
+     Parameters:
+      - dbName
+      - funcName
+     """
+     pass
+ 
+   def alter_function(self, dbName, funcName, newFunc):
+     """
+     Parameters:
+      - dbName
+      - funcName
+      - newFunc
+     """
+     pass
+ 
+   def get_functions(self, dbName, pattern):
+     """
+     Parameters:
+      - dbName
+      - pattern
+     """
+     pass
+ 
+   def get_function(self, dbName, funcName):
+     """
+     Parameters:
+      - dbName
+      - funcName
+     """
+     pass
+ 
+   def get_all_functions(self):
+     pass
+ 
+   def create_role(self, role):
+     """
+     Parameters:
+      - role
+     """
+     pass
+ 
+   def drop_role(self, role_name):
+     """
+     Parameters:
+      - role_name
+     """
+     pass
+ 
+   def get_role_names(self):
+     pass
+ 
+   def grant_role(self, role_name, principal_name, principal_type, grantor, grantorType, grant_option):
+     """
+     Parameters:
+      - role_name
+      - principal_name
+      - principal_type
+      - grantor
+      - grantorType
+      - grant_option
+     """
+     pass
+ 
+   def revoke_role(self, role_name, principal_name, principal_type):
+     """
+     Parameters:
+      - role_name
+      - principal_name
+      - principal_type
+     """
+     pass
+ 
+   def list_roles(self, principal_name, principal_type):
+     """
+     Parameters:
+      - principal_name
+      - principal_type
+     """
+     pass
+ 
+   def grant_revoke_role(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_principals_in_role(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_role_grants_for_principal(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_privilege_set(self, hiveObject, user_name, group_names):
+     """
+     Parameters:
+      - hiveObject
+      - user_name
+      - group_names
+     """
+     pass
+ 
+   def list_privileges(self, principal_name, principal_type, hiveObject):
+     """
+     Parameters:
+      - principal_name
+      - principal_type
+      - hiveObject
+     """
+     pass
+ 
+   def grant_privileges(self, privileges):
+     """
+     Parameters:
+      - privileges
+     """
+     pass
+ 
+   def revoke_privileges(self, privileges):
+     """
+     Parameters:
+      - privileges
+     """
+     pass
+ 
+   def grant_revoke_privileges(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def refresh_privileges(self, objToRefresh, authorizer, grantRequest):
+     """
+     Parameters:
+      - objToRefresh
+      - authorizer
+      - grantRequest
+     """
+     pass
+ 
+   def set_ugi(self, user_name, group_names):
+     """
+     Parameters:
+      - user_name
+      - group_names
+     """
+     pass
+ 
+   def get_delegation_token(self, token_owner, renewer_kerberos_principal_name):
+     """
+     Parameters:
+      - token_owner
+      - renewer_kerberos_principal_name
+     """
+     pass
+ 
+   def renew_delegation_token(self, token_str_form):
+     """
+     Parameters:
+      - token_str_form
+     """
+     pass
+ 
+   def cancel_delegation_token(self, token_str_form):
+     """
+     Parameters:
+      - token_str_form
+     """
+     pass
+ 
+   def add_token(self, token_identifier, delegation_token):
+     """
+     Parameters:
+      - token_identifier
+      - delegation_token
+     """
+     pass
+ 
+   def remove_token(self, token_identifier):
+     """
+     Parameters:
+      - token_identifier
+     """
+     pass
+ 
+   def get_token(self, token_identifier):
+     """
+     Parameters:
+      - token_identifier
+     """
+     pass
+ 
+   def get_all_token_identifiers(self):
+     pass
+ 
+   def add_master_key(self, key):
+     """
+     Parameters:
+      - key
+     """
+     pass
+ 
+   def update_master_key(self, seq_number, key):
+     """
+     Parameters:
+      - seq_number
+      - key
+     """
+     pass
+ 
+   def remove_master_key(self, key_seq):
+     """
+     Parameters:
+      - key_seq
+     """
+     pass
+ 
+   def get_master_keys(self):
+     pass
+ 
+   def get_open_txns(self):
+     pass
+ 
+   def get_open_txns_info(self):
+     pass
+ 
+   def open_txns(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def abort_txn(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def abort_txns(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def commit_txn(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def repl_tbl_writeid_state(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def get_valid_write_ids(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def allocate_table_write_ids(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def lock(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def check_lock(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def unlock(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def show_locks(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def heartbeat(self, ids):
+     """
+     Parameters:
+      - ids
+     """
+     pass
+ 
+   def heartbeat_txn_range(self, txns):
+     """
+     Parameters:
+      - txns
+     """
+     pass
+ 
+   def compact(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def compact2(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def show_compact(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def add_dynamic_partitions(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def get_next_notification(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def get_current_notificationEventId(self):
+     pass
+ 
+   def get_notification_events_count(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def fire_listener_event(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def flushCache(self):
+     pass
+ 
+   def add_write_notification_log(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def cm_recycle(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_file_metadata_by_expr(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def get_file_metadata(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def put_file_metadata(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def clear_file_metadata(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def cache_file_metadata(self, req):
+     """
+     Parameters:
+      - req
+     """
+     pass
+ 
+   def get_metastore_db_uuid(self):
+     pass
+ 
+   def create_resource_plan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_resource_plan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_active_resource_plan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_all_resource_plans(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def alter_resource_plan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def validate_resource_plan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def drop_resource_plan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def create_wm_trigger(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def alter_wm_trigger(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def drop_wm_trigger(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def get_triggers_for_resourceplan(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def create_wm_pool(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def alter_wm_pool(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def drop_wm_pool(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def create_or_update_wm_mapping(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def drop_wm_mapping(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def create_or_drop_wm_trigger_to_pool_mapping(self, request):
+     """
+     Parameters:
+      - request
+     """
+     pass
+ 
+   def create_ischema(self, schema):
+     """
+     Parameters:
+      - schema
+     """
+     pass
+ 
+   def alter_ischema(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def get_ischema(self, name):
+     """
+     Parameters:
+      - name
+     """
+     pass
+ 
+   def drop_ischema(self, name):
+     """
+     Parameters:
+      - name
+     """
+     pass
+ 
+   def add_schema_version(self, schemaVersion):
+     """
+     Parameters:
+      - schemaVersion
+     """
+     pass
+ 
+   def get_schema_version(self, schemaVersion):
+     """
+     Parameters:
+      - schemaVersion
+     """
+     pass
+ 
+   def get_schema_latest_version(self, schemaName):
+     """
+     Parameters:
+      - schemaName
+     """
+     pass
+ 
+   def get_schema_all_versions(self, schemaName):
+     """
+     Parameters:
+      - schemaName
+     """
+     pass
+ 
+   def drop_schema_version(self, schemaVersion):
+     """
+     Parameters:
+      - schemaVersion
+     """
+     pass
+ 
+   def get_schemas_by_cols(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def map_schema_version_to_serde(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def set_schema_version_state(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def add_serde(self, serde):
+     """
+     Parameters:
+      - serde
+     """
+     pass
+ 
+   def get_serde(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+   def get_lock_materialization_rebuild(self, dbName, tableName, txnId):
+     """
+     Parameters:
+      - dbName
+      - tableName
+      - txnId
+     """
+     pass
+ 
+   def heartbeat_lock_materialization_rebuild(self, dbName, tableName, txnId):
+     """
+     Parameters:
+      - dbName
+      - tableName
+      - txnId
+     """
+     pass
+ 
+   def add_runtime_stats(self, stat):
+     """
+     Parameters:
+      - stat
+     """
+     pass
+ 
+   def get_runtime_stats(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     pass
+ 
+ 
+ class Client(fb303.FacebookService.Client, Iface):
+   """
+   This interface is live.
+   """
+   def __init__(self, iprot, oprot=None):
+     fb303.FacebookService.Client.__init__(self, iprot, oprot)
+ 
+   def getMetaConf(self, key):
+     """
+     Parameters:
+      - key
+     """
+     self.send_getMetaConf(key)
+     return self.recv_getMetaConf()
+ 
+   def send_getMetaConf(self, key):
+     self._oprot.writeMessageBegin('getMetaConf', TMessageType.CALL, self._seqid)
+     args = getMetaConf_args()
+     args.key = key
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_getMetaConf(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = getMetaConf_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "getMetaConf failed: unknown result")
+ 
+   def setMetaConf(self, key, value):
+     """
+     Parameters:
+      - key
+      - value
+     """
+     self.send_setMetaConf(key, value)
+     self.recv_setMetaConf()
+ 
+   def send_setMetaConf(self, key, value):
+     self._oprot.writeMessageBegin('setMetaConf', TMessageType.CALL, self._seqid)
+     args = setMetaConf_args()
+     args.key = key
+     args.value = value
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_setMetaConf(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = setMetaConf_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     return
+ 
+   def create_catalog(self, catalog):
+     """
+     Parameters:
+      - catalog
+     """
+     self.send_create_catalog(catalog)
+     self.recv_create_catalog()
+ 
+   def send_create_catalog(self, catalog):
+     self._oprot.writeMessageBegin('create_catalog', TMessageType.CALL, self._seqid)
+     args = create_catalog_args()
+     args.catalog = catalog
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_create_catalog(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = create_catalog_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def alter_catalog(self, rqst):
+     """
+     Parameters:
+      - rqst
+     """
+     self.send_alter_catalog(rqst)
+     self.recv_alter_catalog()
+ 
+   def send_alter_catalog(self, rqst):
+     self._oprot.writeMessageBegin('alter_catalog', TMessageType.CALL, self._seqid)
+     args = alter_catalog_args()
+     args.rqst = rqst
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_alter_catalog(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = alter_catalog_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def get_catalog(self, catName):
+     """
+     Parameters:
+      - catName
+     """
+     self.send_get_catalog(catName)
+     return self.recv_get_catalog()
+ 
+   def send_get_catalog(self, catName):
+     self._oprot.writeMessageBegin('get_catalog', TMessageType.CALL, self._seqid)
+     args = get_catalog_args()
+     args.catName = catName
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_catalog(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_catalog_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalog failed: unknown result")
+ 
+   def get_catalogs(self):
+     self.send_get_catalogs()
+     return self.recv_get_catalogs()
+ 
+   def send_get_catalogs(self):
+     self._oprot.writeMessageBegin('get_catalogs', TMessageType.CALL, self._seqid)
+     args = get_catalogs_args()
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_catalogs(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_catalogs_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_catalogs failed: unknown result")
+ 
+   def drop_catalog(self, catName):
+     """
+     Parameters:
+      - catName
+     """
+     self.send_drop_catalog(catName)
+     self.recv_drop_catalog()
+ 
+   def send_drop_catalog(self, catName):
+     self._oprot.writeMessageBegin('drop_catalog', TMessageType.CALL, self._seqid)
+     args = drop_catalog_args()
+     args.catName = catName
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_drop_catalog(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = drop_catalog_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def create_database(self, database):
+     """
+     Parameters:
+      - database
+     """
+     self.send_create_database(database)
+     self.recv_create_database()
+ 
+   def send_create_database(self, database):
+     self._oprot.writeMessageBegin('create_database', TMessageType.CALL, self._seqid)
+     args = create_database_args()
+     args.database = database
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_create_database(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = create_database_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def get_database(self, name):
+     """
+     Parameters:
+      - name
+     """
+     self.send_get_database(name)
+     return self.recv_get_database()
+ 
+   def send_get_database(self, name):
+     self._oprot.writeMessageBegin('get_database', TMessageType.CALL, self._seqid)
+     args = get_database_args()
+     args.name = name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_database(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_database_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_database failed: unknown result")
+ 
+   def drop_database(self, name, deleteData, cascade):
+     """
+     Parameters:
+      - name
+      - deleteData
+      - cascade
+     """
+     self.send_drop_database(name, deleteData, cascade)
+     self.recv_drop_database()
+ 
+   def send_drop_database(self, name, deleteData, cascade):
+     self._oprot.writeMessageBegin('drop_database', TMessageType.CALL, self._seqid)
+     args = drop_database_args()
+     args.name = name
+     args.deleteData = deleteData
+     args.cascade = cascade
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_drop_database(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = drop_database_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def get_databases(self, pattern):
+     """
+     Parameters:
+      - pattern
+     """
+     self.send_get_databases(pattern)
+     return self.recv_get_databases()
+ 
+   def send_get_databases(self, pattern):
+     self._oprot.writeMessageBegin('get_databases', TMessageType.CALL, self._seqid)
+     args = get_databases_args()
+     args.pattern = pattern
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_databases(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_databases_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_databases failed: unknown result")
+ 
+   def get_all_databases(self):
+     self.send_get_all_databases()
+     return self.recv_get_all_databases()
+ 
+   def send_get_all_databases(self):
+     self._oprot.writeMessageBegin('get_all_databases', TMessageType.CALL, self._seqid)
+     args = get_all_databases_args()
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_all_databases(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_all_databases_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_databases failed: unknown result")
+ 
+   def alter_database(self, dbname, db):
+     """
+     Parameters:
+      - dbname
+      - db
+     """
+     self.send_alter_database(dbname, db)
+     self.recv_alter_database()
+ 
+   def send_alter_database(self, dbname, db):
+     self._oprot.writeMessageBegin('alter_database', TMessageType.CALL, self._seqid)
+     args = alter_database_args()
+     args.dbname = dbname
+     args.db = db
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_alter_database(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = alter_database_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def get_type(self, name):
+     """
+     Parameters:
+      - name
+     """
+     self.send_get_type(name)
+     return self.recv_get_type()
+ 
+   def send_get_type(self, name):
+     self._oprot.writeMessageBegin('get_type', TMessageType.CALL, self._seqid)
+     args = get_type_args()
+     args.name = name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_type(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_type_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type failed: unknown result")
+ 
+   def create_type(self, type):
+     """
+     Parameters:
+      - type
+     """
+     self.send_create_type(type)
+     return self.recv_create_type()
+ 
+   def send_create_type(self, type):
+     self._oprot.writeMessageBegin('create_type', TMessageType.CALL, self._seqid)
+     args = create_type_args()
+     args.type = type
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_create_type(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = create_type_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "create_type failed: unknown result")
+ 
+   def drop_type(self, type):
+     """
+     Parameters:
+      - type
+     """
+     self.send_drop_type(type)
+     return self.recv_drop_type()
+ 
+   def send_drop_type(self, type):
+     self._oprot.writeMessageBegin('drop_type', TMessageType.CALL, self._seqid)
+     args = drop_type_args()
+     args.type = type
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_drop_type(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = drop_type_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "drop_type failed: unknown result")
+ 
+   def get_type_all(self, name):
+     """
+     Parameters:
+      - name
+     """
+     self.send_get_type_all(name)
+     return self.recv_get_type_all()
+ 
+   def send_get_type_all(self, name):
+     self._oprot.writeMessageBegin('get_type_all', TMessageType.CALL, self._seqid)
+     args = get_type_all_args()
+     args.name = name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_type_all(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_type_all_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_type_all failed: unknown result")
+ 
+   def get_fields(self, db_name, table_name):
+     """
+     Parameters:
+      - db_name
+      - table_name
+     """
+     self.send_get_fields(db_name, table_name)
+     return self.recv_get_fields()
+ 
+   def send_get_fields(self, db_name, table_name):
+     self._oprot.writeMessageBegin('get_fields', TMessageType.CALL, self._seqid)
+     args = get_fields_args()
+     args.db_name = db_name
+     args.table_name = table_name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_fields(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_fields_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields failed: unknown result")
+ 
+   def get_fields_with_environment_context(self, db_name, table_name, environment_context):
+     """
+     Parameters:
+      - db_name
+      - table_name
+      - environment_context
+     """
+     self.send_get_fields_with_environment_context(db_name, table_name, environment_context)
+     return self.recv_get_fields_with_environment_context()
+ 
+   def send_get_fields_with_environment_context(self, db_name, table_name, environment_context):
+     self._oprot.writeMessageBegin('get_fields_with_environment_context', TMessageType.CALL, self._seqid)
+     args = get_fields_with_environment_context_args()
+     args.db_name = db_name
+     args.table_name = table_name
+     args.environment_context = environment_context
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_fields_with_environment_context(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_fields_with_environment_context_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_fields_with_environment_context failed: unknown result")
+ 
+   def get_schema(self, db_name, table_name):
+     """
+     Parameters:
+      - db_name
+      - table_name
+     """
+     self.send_get_schema(db_name, table_name)
+     return self.recv_get_schema()
+ 
+   def send_get_schema(self, db_name, table_name):
+     self._oprot.writeMessageBegin('get_schema', TMessageType.CALL, self._seqid)
+     args = get_schema_args()
+     args.db_name = db_name
+     args.table_name = table_name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_schema(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_schema_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema failed: unknown result")
+ 
+   def get_schema_with_environment_context(self, db_name, table_name, environment_context):
+     """
+     Parameters:
+      - db_name
+      - table_name
+      - environment_context
+     """
+     self.send_get_schema_with_environment_context(db_name, table_name, environment_context)
+     return self.recv_get_schema_with_environment_context()
+ 
+   def send_get_schema_with_environment_context(self, db_name, table_name, environment_context):
+     self._oprot.writeMessageBegin('get_schema_with_environment_context', TMessageType.CALL, self._seqid)
+     args = get_schema_with_environment_context_args()
+     args.db_name = db_name
+     args.table_name = table_name
+     args.environment_context = environment_context
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_schema_with_environment_context(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_schema_with_environment_context_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_schema_with_environment_context failed: unknown result")
+ 
+   def create_table(self, tbl):
+     """
+     Parameters:
+      - tbl
+     """
+     self.send_create_table(tbl)
+     self.recv_create_table()
+ 
+   def send_create_table(self, tbl):
+     self._oprot.writeMessageBegin('create_table', TMessageType.CALL, self._seqid)
+     args = create_table_args()
+     args.tbl = tbl
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_create_table(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = create_table_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     if result.o4 is not None:
+       raise result.o4
+     return
+ 
+   def create_table_with_environment_context(self, tbl, environment_context):
+     """
+     Parameters:
+      - tbl
+      - environment_context
+     """
+     self.send_create_table_with_environment_context(tbl, environment_context)
+     self.recv_create_table_with_environment_context()
+ 
+   def send_create_table_with_environment_context(self, tbl, environment_context):
+     self._oprot.writeMessageBegin('create_table_with_environment_context', TMessageType.CALL, self._seqid)
+     args = create_table_with_environment_context_args()
+     args.tbl = tbl
+     args.environment_context = environment_context
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_create_table_with_environment_context(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = create_table_with_environment_context_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     if result.o4 is not None:
+       raise result.o4
+     return
+ 
+   def create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints):
+     """
+     Parameters:
+      - tbl
+      - primaryKeys
+      - foreignKeys
+      - uniqueConstraints
+      - notNullConstraints
+      - defaultConstraints
+      - checkConstraints
+     """
+     self.send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)
+     self.recv_create_table_with_constraints()
+ 
+   def send_create_table_with_constraints(self, tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints):
+     self._oprot.writeMessageBegin('create_table_with_constraints', TMessageType.CALL, self._seqid)
+     args = create_table_with_constraints_args()
+     args.tbl = tbl
+     args.primaryKeys = primaryKeys
+     args.foreignKeys = foreignKeys
+     args.uniqueConstraints = uniqueConstraints
+     args.notNullConstraints = notNullConstraints
+     args.defaultConstraints = defaultConstraints
+     args.checkConstraints = checkConstraints
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_create_table_with_constraints(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = create_table_with_constraints_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     if result.o4 is not None:
+       raise result.o4
+     return
+ 
+   def drop_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_drop_constraint(req)
+     self.recv_drop_constraint()
+ 
+   def send_drop_constraint(self, req):
+     self._oprot.writeMessageBegin('drop_constraint', TMessageType.CALL, self._seqid)
+     args = drop_constraint_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_drop_constraint(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = drop_constraint_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def add_primary_key(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_add_primary_key(req)
+     self.recv_add_primary_key()
+ 
+   def send_add_primary_key(self, req):
+     self._oprot.writeMessageBegin('add_primary_key', TMessageType.CALL, self._seqid)
+     args = add_primary_key_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_primary_key(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_primary_key_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def add_foreign_key(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_add_foreign_key(req)
+     self.recv_add_foreign_key()
+ 
+   def send_add_foreign_key(self, req):
+     self._oprot.writeMessageBegin('add_foreign_key', TMessageType.CALL, self._seqid)
+     args = add_foreign_key_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_foreign_key(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_foreign_key_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def add_unique_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_add_unique_constraint(req)
+     self.recv_add_unique_constraint()
+ 
+   def send_add_unique_constraint(self, req):
+     self._oprot.writeMessageBegin('add_unique_constraint', TMessageType.CALL, self._seqid)
+     args = add_unique_constraint_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_unique_constraint(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_unique_constraint_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def add_not_null_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_add_not_null_constraint(req)
+     self.recv_add_not_null_constraint()
+ 
+   def send_add_not_null_constraint(self, req):
+     self._oprot.writeMessageBegin('add_not_null_constraint', TMessageType.CALL, self._seqid)
+     args = add_not_null_constraint_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_not_null_constraint(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_not_null_constraint_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def add_default_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_add_default_constraint(req)
+     self.recv_add_default_constraint()
+ 
+   def send_add_default_constraint(self, req):
+     self._oprot.writeMessageBegin('add_default_constraint', TMessageType.CALL, self._seqid)
+     args = add_default_constraint_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_default_constraint(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_default_constraint_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def add_check_constraint(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_add_check_constraint(req)
+     self.recv_add_check_constraint()
+ 
+   def send_add_check_constraint(self, req):
+     self._oprot.writeMessageBegin('add_check_constraint', TMessageType.CALL, self._seqid)
+     args = add_check_constraint_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_check_constraint(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_check_constraint_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def drop_table(self, dbname, name, deleteData):
+     """
+     Parameters:
+      - dbname
+      - name
+      - deleteData
+     """
+     self.send_drop_table(dbname, name, deleteData)
+     self.recv_drop_table()
+ 
+   def send_drop_table(self, dbname, name, deleteData):
+     self._oprot.writeMessageBegin('drop_table', TMessageType.CALL, self._seqid)
+     args = drop_table_args()
+     args.dbname = dbname
+     args.name = name
+     args.deleteData = deleteData
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_drop_table(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = drop_table_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def drop_table_with_environment_context(self, dbname, name, deleteData, environment_context):
+     """
+     Parameters:
+      - dbname
+      - name
+      - deleteData
+      - environment_context
+     """
+     self.send_drop_table_with_environment_context(dbname, name, deleteData, environment_context)
+     self.recv_drop_table_with_environment_context()
+ 
+   def send_drop_table_with_environment_context(self, dbname, name, deleteData, environment_context):
+     self._oprot.writeMessageBegin('drop_table_with_environment_context', TMessageType.CALL, self._seqid)
+     args = drop_table_with_environment_context_args()
+     args.dbname = dbname
+     args.name = name
+     args.deleteData = deleteData
+     args.environment_context = environment_context
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_drop_table_with_environment_context(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = drop_table_with_environment_context_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def truncate_table(self, dbName, tableName, partNames):
+     """
+     Parameters:
+      - dbName
+      - tableName
+      - partNames
+     """
+     self.send_truncate_table(dbName, tableName, partNames)
+     self.recv_truncate_table()
+ 
+   def send_truncate_table(self, dbName, tableName, partNames):
+     self._oprot.writeMessageBegin('truncate_table', TMessageType.CALL, self._seqid)
+     args = truncate_table_args()
+     args.dbName = dbName
+     args.tableName = tableName
+     args.partNames = partNames
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_truncate_table(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = truncate_table_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     return
+ 
+   def get_tables(self, db_name, pattern):
+     """
+     Parameters:
+      - db_name
+      - pattern
+     """
+     self.send_get_tables(db_name, pattern)
+     return self.recv_get_tables()
+ 
+   def send_get_tables(self, db_name, pattern):
+     self._oprot.writeMessageBegin('get_tables', TMessageType.CALL, self._seqid)
+     args = get_tables_args()
+     args.db_name = db_name
+     args.pattern = pattern
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_tables(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_tables_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables failed: unknown result")
+ 
+   def get_tables_by_type(self, db_name, pattern, tableType):
+     """
+     Parameters:
+      - db_name
+      - pattern
+      - tableType
+     """
+     self.send_get_tables_by_type(db_name, pattern, tableType)
+     return self.recv_get_tables_by_type()
+ 
+   def send_get_tables_by_type(self, db_name, pattern, tableType):
+     self._oprot.writeMessageBegin('get_tables_by_type', TMessageType.CALL, self._seqid)
+     args = get_tables_by_type_args()
+     args.db_name = db_name
+     args.pattern = pattern
+     args.tableType = tableType
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_tables_by_type(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_tables_by_type_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_tables_by_type failed: unknown result")
+ 
+   def get_materialized_views_for_rewriting(self, db_name):
+     """
+     Parameters:
+      - db_name
+     """
+     self.send_get_materialized_views_for_rewriting(db_name)
+     return self.recv_get_materialized_views_for_rewriting()
+ 
+   def send_get_materialized_views_for_rewriting(self, db_name):
+     self._oprot.writeMessageBegin('get_materialized_views_for_rewriting', TMessageType.CALL, self._seqid)
+     args = get_materialized_views_for_rewriting_args()
+     args.db_name = db_name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_materialized_views_for_rewriting(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_materialized_views_for_rewriting_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_materialized_views_for_rewriting failed: unknown result")
+ 
+   def get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
+     """
+     Parameters:
+      - db_patterns
+      - tbl_patterns
+      - tbl_types
+     """
+     self.send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
+     return self.recv_get_table_meta()
+ 
+   def send_get_table_meta(self, db_patterns, tbl_patterns, tbl_types):
+     self._oprot.writeMessageBegin('get_table_meta', TMessageType.CALL, self._seqid)
+     args = get_table_meta_args()
+     args.db_patterns = db_patterns
+     args.tbl_patterns = tbl_patterns
+     args.tbl_types = tbl_types
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_table_meta(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_table_meta_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_meta failed: unknown result")
+ 
+   def get_all_tables(self, db_name):
+     """
+     Parameters:
+      - db_name
+     """
+     self.send_get_all_tables(db_name)
+     return self.recv_get_all_tables()
+ 
+   def send_get_all_tables(self, db_name):
+     self._oprot.writeMessageBegin('get_all_tables', TMessageType.CALL, self._seqid)
+     args = get_all_tables_args()
+     args.db_name = db_name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_all_tables(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_all_tables_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_all_tables failed: unknown result")
+ 
+   def get_table(self, dbname, tbl_name):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+     """
+     self.send_get_table(dbname, tbl_name)
+     return self.recv_get_table()
+ 
+   def send_get_table(self, dbname, tbl_name):
+     self._oprot.writeMessageBegin('get_table', TMessageType.CALL, self._seqid)
+     args = get_table_args()
+     args.dbname = dbname
+     args.tbl_name = tbl_name
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_table(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_table_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table failed: unknown result")
+ 
+   def get_table_objects_by_name(self, dbname, tbl_names):
+     """
+     Parameters:
+      - dbname
+      - tbl_names
+     """
+     self.send_get_table_objects_by_name(dbname, tbl_names)
+     return self.recv_get_table_objects_by_name()
+ 
+   def send_get_table_objects_by_name(self, dbname, tbl_names):
+     self._oprot.writeMessageBegin('get_table_objects_by_name', TMessageType.CALL, self._seqid)
+     args = get_table_objects_by_name_args()
+     args.dbname = dbname
+     args.tbl_names = tbl_names
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_table_objects_by_name(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_table_objects_by_name_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_objects_by_name failed: unknown result")
+ 
+   def get_table_req(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_get_table_req(req)
+     return self.recv_get_table_req()
+ 
+   def send_get_table_req(self, req):
+     self._oprot.writeMessageBegin('get_table_req', TMessageType.CALL, self._seqid)
+     args = get_table_req_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_table_req(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_table_req_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_req failed: unknown result")
+ 
+   def get_table_objects_by_name_req(self, req):
+     """
+     Parameters:
+      - req
+     """
+     self.send_get_table_objects_by_name_req(req)
+     return self.recv_get_table_objects_by_name_req()
+ 
+   def send_get_table_objects_by_name_req(self, req):
+     self._oprot.writeMessageBegin('get_table_objects_by_name_req', TMessageType.CALL, self._seqid)
+     args = get_table_objects_by_name_req_args()
+     args.req = req
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_table_objects_by_name_req(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_table_objects_by_name_req_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_objects_by_name_req failed: unknown result")
+ 
+   def get_materialization_invalidation_info(self, dbname, tbl_names):
+     """
+     Parameters:
+      - dbname
+      - tbl_names
+     """
+     self.send_get_materialization_invalidation_info(dbname, tbl_names)
+     return self.recv_get_materialization_invalidation_info()
+ 
+   def send_get_materialization_invalidation_info(self, dbname, tbl_names):
+     self._oprot.writeMessageBegin('get_materialization_invalidation_info', TMessageType.CALL, self._seqid)
+     args = get_materialization_invalidation_info_args()
+     args.dbname = dbname
+     args.tbl_names = tbl_names
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_materialization_invalidation_info(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_materialization_invalidation_info_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_materialization_invalidation_info failed: unknown result")
+ 
+   def update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata):
+     """
+     Parameters:
+      - catName
+      - dbname
+      - tbl_name
+      - creation_metadata
+     """
+     self.send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata)
+     self.recv_update_creation_metadata()
+ 
+   def send_update_creation_metadata(self, catName, dbname, tbl_name, creation_metadata):
+     self._oprot.writeMessageBegin('update_creation_metadata', TMessageType.CALL, self._seqid)
+     args = update_creation_metadata_args()
+     args.catName = catName
+     args.dbname = dbname
+     args.tbl_name = tbl_name
+     args.creation_metadata = creation_metadata
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_update_creation_metadata(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = update_creation_metadata_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     return
+ 
+   def get_table_names_by_filter(self, dbname, filter, max_tables):
+     """
+     Parameters:
+      - dbname
+      - filter
+      - max_tables
+     """
+     self.send_get_table_names_by_filter(dbname, filter, max_tables)
+     return self.recv_get_table_names_by_filter()
+ 
+   def send_get_table_names_by_filter(self, dbname, filter, max_tables):
+     self._oprot.writeMessageBegin('get_table_names_by_filter', TMessageType.CALL, self._seqid)
+     args = get_table_names_by_filter_args()
+     args.dbname = dbname
+     args.filter = filter
+     args.max_tables = max_tables
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_get_table_names_by_filter(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = get_table_names_by_filter_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "get_table_names_by_filter failed: unknown result")
+ 
+   def alter_table(self, dbname, tbl_name, new_tbl):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+      - new_tbl
+     """
+     self.send_alter_table(dbname, tbl_name, new_tbl)
+     self.recv_alter_table()
+ 
+   def send_alter_table(self, dbname, tbl_name, new_tbl):
+     self._oprot.writeMessageBegin('alter_table', TMessageType.CALL, self._seqid)
+     args = alter_table_args()
+     args.dbname = dbname
+     args.tbl_name = tbl_name
+     args.new_tbl = new_tbl
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_alter_table(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = alter_table_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+      - new_tbl
+      - environment_context
+     """
+     self.send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context)
+     self.recv_alter_table_with_environment_context()
+ 
+   def send_alter_table_with_environment_context(self, dbname, tbl_name, new_tbl, environment_context):
+     self._oprot.writeMessageBegin('alter_table_with_environment_context', TMessageType.CALL, self._seqid)
+     args = alter_table_with_environment_context_args()
+     args.dbname = dbname
+     args.tbl_name = tbl_name
+     args.new_tbl = new_tbl
+     args.environment_context = environment_context
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_alter_table_with_environment_context(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = alter_table_with_environment_context_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade):
+     """
+     Parameters:
+      - dbname
+      - tbl_name
+      - new_tbl
+      - cascade
+     """
+     self.send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+     self.recv_alter_table_with_cascade()
+ 
+   def send_alter_table_with_cascade(self, dbname, tbl_name, new_tbl, cascade):
+     self._oprot.writeMessageBegin('alter_table_with_cascade', TMessageType.CALL, self._seqid)
+     args = alter_table_with_cascade_args()
+     args.dbname = dbname
+     args.tbl_name = tbl_name
+     args.new_tbl = new_tbl
+     args.cascade = cascade
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_alter_table_with_cascade(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = alter_table_with_cascade_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     return
+ 
+   def add_partition(self, new_part):
+     """
+     Parameters:
+      - new_part
+     """
+     self.send_add_partition(new_part)
+     return self.recv_add_partition()
+ 
+   def send_add_partition(self, new_part):
+     self._oprot.writeMessageBegin('add_partition', TMessageType.CALL, self._seqid)
+     args = add_partition_args()
+     args.new_part = new_part
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_partition(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_partition_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partition failed: unknown result")
+ 
+   def add_partition_with_environment_context(self, new_part, environment_context):
+     """
+     Parameters:
+      - new_part
+      - environment_context
+     """
+     self.send_add_partition_with_environment_context(new_part, environment_context)
+     return self.recv_add_partition_with_environment_context()
+ 
+   def send_add_partition_with_environment_context(self, new_part, environment_context):
+     self._oprot.writeMessageBegin('add_partition_with_environment_context', TMessageType.CALL, self._seqid)
+     args = add_partition_with_environment_context_args()
+     args.new_part = new_part
+     args.environment_context = environment_context
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     self._oprot.trans.flush()
+ 
+   def recv_add_partition_with_environment_context(self):
+     iprot = self._iprot
+     (fname, mtype, rseqid) = iprot.readMessageBegin()
+     if mtype == TMessageType.EXCEPTION:
+       x = TApplicationException()
+       x.read(iprot)
+       iprot.readMessageEnd()
+       raise x
+     result = add_partition_with_environment_context_result()
+     result.read(iprot)
+     iprot.readMessageEnd()
+     if result.success is not None:
+       return result.success
+     if result.o1 is not None:
+       raise result.o1
+     if result.o2 is not None:
+       raise result.o2
+     if result.o3 is not None:
+       raise result.o3
+     raise TApplicationException(TApplicationException.MISSING_RESULT, "add_partition_with_environment_context failed: unknown result")
+ 
+   def add_partitions(self, new_parts):
+     """
+     Parameters:
+      - new_parts
+     """
+     self.send_add_partitions(new_parts)
+     return self.recv_add_partitions()
+ 
+   def send_add_partitions(self, new_parts):
+     self._oprot.writeMessageBegin('add_partitions', TMessageType.CALL, self._seqid)
+     args = add_partitions_args()
+     args.new_parts = new_parts
+     args.write(self._oprot)
+     self._oprot.writeMessageEnd()
+     s

<TRUNCATED>

[54/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
index 0000000,abbcda3..c5977b2
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/InjectableBehaviourObjectStore.java
@@@ -1,0 -1,211 +1,218 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
++
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ 
+ import static org.junit.Assert.assertEquals;
+ 
+ 
+ /**
+  * A wrapper around {@link ObjectStore} that allows us to inject custom behaviour
+  * on to some of the methods for testing.
+  */
+ public class InjectableBehaviourObjectStore extends ObjectStore {
+   public InjectableBehaviourObjectStore() {
+     super();
+   }
+ 
+   /**
+    * A utility class that allows people injecting behaviour to determine if their injections occurred.
+    */
+   public static abstract class BehaviourInjection<T, F>
+       implements com.google.common.base.Function<T, F>{
+     protected boolean injectionPathCalled = false;
+     protected boolean nonInjectedPathCalled = false;
+ 
+     public void assertInjectionsPerformed(
+         boolean expectedInjectionCalled, boolean expectedNonInjectedPathCalled){
+       assertEquals(expectedInjectionCalled, injectionPathCalled);
+       assertEquals(expectedNonInjectedPathCalled, nonInjectedPathCalled);
+     }
+   }
+ 
+   /**
+    * A utility class to pass the arguments of the caller to the stub method.
+    */
+   public class CallerArguments {
+     public String dbName;
+     public String tblName;
+     public String funcName;
+     public String constraintTblName;
+ 
+     public CallerArguments(String dbName) {
+       this.dbName = dbName;
+     }
+   }
+ 
+   private static com.google.common.base.Function<Table, Table> getTableModifier =
+       com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<Partition, Partition> getPartitionModifier =
+           com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<List<String>, List<String>> listPartitionNamesModifier =
+           com.google.common.base.Functions.identity();
+   private static com.google.common.base.Function<NotificationEventResponse, NotificationEventResponse>
+           getNextNotificationModifier = com.google.common.base.Functions.identity();
+ 
+   private static com.google.common.base.Function<CallerArguments, Boolean> callerVerifier = null;
+ 
+   // Methods to set/reset getTable modifier
+   public static void setGetTableBehaviour(com.google.common.base.Function<Table, Table> modifier){
+     getTableModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetTableBehaviour(){
+     setGetTableBehaviour(null);
+   }
+ 
+   // Methods to set/reset getPartition modifier
+   public static void setGetPartitionBehaviour(com.google.common.base.Function<Partition, Partition> modifier){
+     getPartitionModifier = (modifier == null) ? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetPartitionBehaviour(){
+     setGetPartitionBehaviour(null);
+   }
+ 
+   // Methods to set/reset listPartitionNames modifier
+   public static void setListPartitionNamesBehaviour(com.google.common.base.Function<List<String>, List<String>> modifier){
+     listPartitionNamesModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetListPartitionNamesBehaviour(){
+     setListPartitionNamesBehaviour(null);
+   }
+ 
+   // Methods to set/reset getNextNotification modifier
+   public static void setGetNextNotificationBehaviour(
+           com.google.common.base.Function<NotificationEventResponse,NotificationEventResponse> modifier){
+     getNextNotificationModifier = (modifier == null)? com.google.common.base.Functions.identity() : modifier;
+   }
+ 
+   public static void resetGetNextNotificationBehaviour(){
+     setGetNextNotificationBehaviour(null);
+   }
+ 
+   // Methods to set/reset caller checker
+   public static void setCallerVerifier(com.google.common.base.Function<CallerArguments, Boolean> verifier){
+     callerVerifier = verifier;
+   }
+ 
+   public static void resetCallerVerifier(){
+     setCallerVerifier(null);
+   }
+ 
+   // ObjectStore methods to be overridden with injected behavior
+   @Override
+   public Table getTable(String catName, String dbName, String tableName) throws MetaException {
+     return getTableModifier.apply(super.getTable(catName, dbName, tableName));
+   }
+ 
+   @Override
++  public Table getTable(String catName, String dbName, String tableName,
++      long txnId, String writeIdList) throws MetaException {
++    return getTableModifier.apply(super.getTable(catName, dbName, tableName, txnId, writeIdList));
++  }
++
++  @Override
+   public Partition getPartition(String catName, String dbName, String tableName,
+                                 List<String> partVals) throws NoSuchObjectException, MetaException {
+     return getPartitionModifier.apply(super.getPartition(catName, dbName, tableName, partVals));
+   }
+ 
+   @Override
+   public List<String> listPartitionNames(String catName, String dbName, String tableName, short max)
+           throws MetaException {
+     return listPartitionNamesModifier.apply(super.listPartitionNames(catName, dbName, tableName, max));
+   }
+ 
+   @Override
+   public NotificationEventResponse getNextNotification(NotificationEventRequest rqst) {
+     return getNextNotificationModifier.apply(super.getNextNotification(rqst));
+   }
+ 
+   @Override
+   public void createTable(Table tbl) throws InvalidObjectException, MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(tbl.getDbName());
+       args.tblName = tbl.getTableName();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Table operation on DB: "
+                 + args.dbName + " table: " + args.tblName);
+       }
+     }
+     super.createTable(tbl);
+   }
+ 
+   @Override
+   public void createFunction(Function func) throws InvalidObjectException, MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(func.getDbName());
+       args.funcName = func.getFunctionName();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Create Function operation on DB: "
+                 + args.dbName + " function: " + args.funcName);
+       }
+     }
+     super.createFunction(func);
+   }
+ 
+   @Override
+   public List<String> addPrimaryKeys(List<SQLPrimaryKey> pks) throws InvalidObjectException,
+           MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(pks.get(0).getTable_db());
+       args.constraintTblName = pks.get(0).getTable_name();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Add Primary Key operation on DB: "
+                 + args.dbName + " table: " + args.constraintTblName);
+       }
+     }
+     return super.addPrimaryKeys(pks);
+   }
+ 
+   @Override
+   public List<String> addForeignKeys(List<SQLForeignKey> fks) throws InvalidObjectException,
+           MetaException {
+     if (callerVerifier != null) {
+       CallerArguments args = new CallerArguments(fks.get(0).getFktable_db());
+       args.constraintTblName = fks.get(0).getFktable_name();
+       Boolean success = callerVerifier.apply(args);
+       if ((success != null) && !success) {
+         throw new MetaException("InjectableBehaviourObjectStore: Invalid Add Foreign Key operation on DB: "
+                 + args.dbName + " table: " + args.constraintTblName);
+       }
+     }
+     return super.addForeignKeys(fks);
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
index 0000000,adc82b0..d9dd954
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestHiveAlterHandler.java
@@@ -1,0 -1,121 +1,121 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ 
+ package org.apache.hadoop.hive.metastore;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ 
+ import java.util.Arrays;
+ 
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestHiveAlterHandler {
+ 
+   private Configuration conf = MetastoreConf.newMetastoreConf();
+ 
+   @Test
+   public void testAlterTableAddColNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3"));
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null);
+   }
+ 
+   @Test
+   public void testAlterTableDelColUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col2, col3));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null);
+     Mockito.verify(msdb, Mockito.times(1)).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4")
+     );
+   }
+ 
+   @Test
+   public void testAlterTableChangePosNotUpdateStats() throws MetaException, InvalidObjectException, NoSuchObjectException {
+     FieldSchema col1 = new FieldSchema("col1", "string", "col1 comment");
+     FieldSchema col2 = new FieldSchema("col2", "string", "col2 comment");
+     FieldSchema col3 = new FieldSchema("col3", "string", "col3 comment");
+     FieldSchema col4 = new FieldSchema("col4", "string", "col4 comment");
+ 
+     StorageDescriptor oldSd = new StorageDescriptor();
+     oldSd.setCols(Arrays.asList(col1, col2, col3, col4));
+     Table oldTable = new Table();
+     oldTable.setDbName("default");
+     oldTable.setTableName("test_table");
+     oldTable.setSd(oldSd);
+ 
+     StorageDescriptor newSd = new StorageDescriptor(oldSd);
+     newSd.setCols(Arrays.asList(col1, col4, col2, col3));
+     Table newTable = new Table(oldTable);
+     newTable.setSd(newSd);
+ 
+     RawStore msdb = Mockito.mock(RawStore.class);
+     Mockito.doThrow(new RuntimeException("shouldn't be called")).when(msdb).getTableColumnStatistics(
+         getDefaultCatalog(conf), oldTable.getDbName(), oldTable.getTableName(), Arrays.asList("col1", "col2", "col3", "col4"));
+     HiveAlterHandler handler = new HiveAlterHandler();
+     handler.setConf(conf);
 -    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable);
++    handler.alterTableUpdateTableColumnStats(msdb, oldTable, newTable, null);
+   }
+ 
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index 0000000,833e2bd..c40d45d
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ b/standalone-metastore/metastore-common/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@@ -1,0 -1,904 +1,904 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import com.codahale.metrics.Counter;
+ import com.google.common.base.Supplier;
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.hive.metastore.ObjectStore.RetryingExecutor;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.annotation.MetastoreUnitTest;
+ import org.apache.hadoop.hive.metastore.api.BooleanColumnStatsData;
+ import org.apache.hadoop.hive.metastore.api.Catalog;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.Function;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
+ import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.NotificationEvent;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventRequest;
+ import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.PrincipalType;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
+ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
+ import org.apache.hadoop.hive.metastore.api.Role;
+ import org.apache.hadoop.hive.metastore.api.SQLForeignKey;
+ import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey;
+ import org.apache.hadoop.hive.metastore.api.SerDeInfo;
+ import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.client.builder.CatalogBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PartitionBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectPrivilegeBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.HiveObjectRefBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.PrivilegeGrantInfoBuilder;
+ import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.metrics.Metrics;
+ import org.apache.hadoop.hive.metastore.metrics.MetricsConstants;
+ import org.apache.hadoop.hive.metastore.model.MNotificationLog;
+ import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
+ import org.junit.Assert;
+ import org.junit.Assume;
+ import org.junit.Before;
+ import org.junit.Ignore;
+ import org.junit.Test;
+ import org.junit.experimental.categories.Category;
+ import org.mockito.Mockito;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import javax.jdo.Query;
+ import java.sql.Connection;
+ import java.sql.DriverManager;
+ import java.sql.ResultSet;
+ import java.sql.SQLException;
+ import java.sql.Statement;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.HashMap;
+ import java.util.HashSet;
+ import java.util.List;
+ import java.util.Set;
+ import java.util.concurrent.BrokenBarrierException;
+ import java.util.concurrent.CyclicBarrier;
+ import java.util.concurrent.ExecutionException;
+ import java.util.concurrent.ExecutorService;
+ import java.util.concurrent.Executors;
+ import java.util.concurrent.TimeUnit;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ 
+ @Category(MetastoreUnitTest.class)
+ public class TestObjectStore {
+   private ObjectStore objectStore = null;
+   private Configuration conf;
+ 
+   private static final String DB1 = "testobjectstoredb1";
+   private static final String DB2 = "testobjectstoredb2";
+   private static final String TABLE1 = "testobjectstoretable1";
+   private static final String KEY1 = "testobjectstorekey1";
+   private static final String KEY2 = "testobjectstorekey2";
+   private static final String OWNER = "testobjectstoreowner";
+   private static final String USER1 = "testobjectstoreuser1";
+   private static final String ROLE1 = "testobjectstorerole1";
+   private static final String ROLE2 = "testobjectstorerole2";
+   private static final Logger LOG = LoggerFactory.getLogger(TestObjectStore.class.getName());
+ 
+   private static final class LongSupplier implements Supplier<Long> {
+     public long value = 0;
+ 
+     @Override
+     public Long get() {
+       return value;
+     }
+   }
+ 
+   @Before
+   public void setUp() throws Exception {
+     conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.HIVE_IN_TEST, true);
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+ 
+     objectStore = new ObjectStore();
+     objectStore.setConf(conf);
+     dropAllStoreObjects(objectStore);
+     HiveMetaStore.HMSHandler.createDefaultCatalog(objectStore, new Warehouse(conf));
+   }
+ 
+   @Test
+   public void catalogs() throws MetaException, NoSuchObjectException {
+     final String names[] = {"cat1", "cat2"};
+     final String locations[] = {"loc1", "loc2"};
+     final String descriptions[] = {"description 1", "description 2"};
+ 
+     for (int i = 0; i < names.length; i++) {
+       Catalog cat = new CatalogBuilder()
+           .setName(names[i])
+           .setLocation(locations[i])
+           .setDescription(descriptions[i])
+           .build();
+       objectStore.createCatalog(cat);
+     }
+ 
+     List<String> fetchedNames = objectStore.getCatalogs();
+     Assert.assertEquals(3, fetchedNames.size());
+     for (int i = 0; i < names.length - 1; i++) {
+       Assert.assertEquals(names[i], fetchedNames.get(i));
+       Catalog cat = objectStore.getCatalog(fetchedNames.get(i));
+       Assert.assertEquals(names[i], cat.getName());
+       Assert.assertEquals(descriptions[i], cat.getDescription());
+       Assert.assertEquals(locations[i], cat.getLocationUri());
+     }
+     Catalog cat = objectStore.getCatalog(fetchedNames.get(2));
+     Assert.assertEquals(DEFAULT_CATALOG_NAME, cat.getName());
+     Assert.assertEquals(Warehouse.DEFAULT_CATALOG_COMMENT, cat.getDescription());
+     // Location will vary by system.
+ 
+     for (int i = 0; i < names.length; i++) objectStore.dropCatalog(names[i]);
+     fetchedNames = objectStore.getCatalogs();
+     Assert.assertEquals(1, fetchedNames.size());
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void getNoSuchCatalog() throws MetaException, NoSuchObjectException {
+     objectStore.getCatalog("no_such_catalog");
+   }
+ 
+   @Test(expected = NoSuchObjectException.class)
+   public void dropNoSuchCatalog() throws MetaException, NoSuchObjectException {
+     objectStore.dropCatalog("no_such_catalog");
+   }
+ 
+   // TODO test dropping non-empty catalog
+ 
+   /**
+    * Test database operations
+    */
+   @Test
+   public void testDatabaseOps() throws MetaException, InvalidObjectException,
+       NoSuchObjectException {
+     String catName = "tdo1_cat";
+     createTestCatalog(catName);
+     Database db1 = new Database(DB1, "description", "locationurl", null);
+     Database db2 = new Database(DB2, "description", "locationurl", null);
+     db1.setCatalogName(catName);
+     db2.setCatalogName(catName);
+     objectStore.createDatabase(db1);
+     objectStore.createDatabase(db2);
+ 
+     List<String> databases = objectStore.getAllDatabases(catName);
+     LOG.info("databases: " + databases);
+     Assert.assertEquals(2, databases.size());
+     Assert.assertEquals(DB1, databases.get(0));
+     Assert.assertEquals(DB2, databases.get(1));
+ 
+     objectStore.dropDatabase(catName, DB1);
+     databases = objectStore.getAllDatabases(catName);
+     Assert.assertEquals(1, databases.size());
+     Assert.assertEquals(DB2, databases.get(0));
+ 
+     objectStore.dropDatabase(catName, DB2);
+   }
+ 
+   /**
+    * Test table operations
+    */
+   @Test
+   public void testTableOps() throws MetaException, InvalidObjectException, NoSuchObjectException,
+       InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+         .setName(DB1)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     objectStore.createDatabase(db1);
+     StorageDescriptor sd1 =
+         new StorageDescriptor(ImmutableList.of(new FieldSchema("pk_col", "double", null)),
+             "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+             null, null, null);
+     HashMap<String, String> params = new HashMap<>();
+     params.put("EXTERNAL", "false");
+     Table tbl1 =
+         new Table(TABLE1, DB1, "owner", 1, 2, 3, sd1, null, params, null, null, "MANAGED_TABLE");
+     objectStore.createTable(tbl1);
+ 
+     List<String> tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(1, tables.size());
+     Assert.assertEquals(TABLE1, tables.get(0));
+ 
+     StorageDescriptor sd2 =
+         new StorageDescriptor(ImmutableList.of(new FieldSchema("fk_col", "double", null)),
+             "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null),
+             null, null, null);
+     Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd2, null, params, null, null,
+         "MANAGED_TABLE");
+ 
+     // Change different fields and verify they were altered
+     newTbl1.setOwner("role1");
+     newTbl1.setOwnerType(PrincipalType.ROLE);
+ 
 -    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1);
++    objectStore.alterTable(DEFAULT_CATALOG_NAME, DB1, TABLE1, newTbl1, -1, null);
+     tables = objectStore.getTables(DEFAULT_CATALOG_NAME, DB1, "new*");
+     Assert.assertEquals(1, tables.size());
+     Assert.assertEquals("new" + TABLE1, tables.get(0));
+ 
+     // Verify fields were altered during the alterTable operation
+     Table alteredTable = objectStore.getTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+     Assert.assertEquals("Owner of table was not altered", newTbl1.getOwner(), alteredTable.getOwner());
+     Assert.assertEquals("Owner type of table was not altered", newTbl1.getOwnerType(), alteredTable.getOwnerType());
+ 
+     objectStore.createTable(tbl1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(2, tables.size());
+ 
+     List<SQLForeignKey> foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+     Assert.assertEquals(0, foreignKeys.size());
+ 
+     SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1,
+         "pk_const_1", false, false, false);
+     pk.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPrimaryKeys(ImmutableList.of(pk));
+     SQLForeignKey fk = new SQLForeignKey(DB1, TABLE1, "pk_col",
+         DB1, "new" + TABLE1, "fk_col", 1,
+         0, 0, "fk_const_1", "pk_const_1", false, false, false);
+     objectStore.addForeignKeys(ImmutableList.of(fk));
+ 
+     // Retrieve from PK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     Assert.assertEquals(1, foreignKeys.size());
+ 
+     List<SQLForeignKey> fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     if (fks != null) {
+       for (SQLForeignKey fkcol : fks) {
+         objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), fkcol.getFktable_name(),
+             fkcol.getFk_name());
+       }
+     }
+     // Retrieve from FK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+     Assert.assertEquals(0, foreignKeys.size());
+     // Retrieve from PK side
+     foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, DB1, "new" + TABLE1);
+     Assert.assertEquals(0, foreignKeys.size());
+ 
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(1, tables.size());
+ 
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, "new" + TABLE1);
+     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     Assert.assertEquals(0, tables.size());
+ 
+     objectStore.dropDatabase(db1.getCatalogName(), DB1);
+   }
+ 
+   private StorageDescriptor createFakeSd(String location) {
+     return new StorageDescriptor(null, location, null, null, false, 0,
+         new SerDeInfo("SerDeName", "serializationLib", null), null, null, null);
+   }
+ 
+ 
+   /**
+    * Tests partition operations
+    */
+   @Test
+   public void testPartitionOps() throws MetaException, InvalidObjectException,
+       NoSuchObjectException, InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+         .setName(DB1)
+         .setDescription("description")
+         .setLocation("locationurl")
+         .build(conf);
+     objectStore.createDatabase(db1);
+     StorageDescriptor sd = createFakeSd("location");
+     HashMap<String, String> tableParams = new HashMap<>();
+     tableParams.put("EXTERNAL", "false");
+     FieldSchema partitionKey1 = new FieldSchema("Country", ColumnType.STRING_TYPE_NAME, "");
+     FieldSchema partitionKey2 = new FieldSchema("State", ColumnType.STRING_TYPE_NAME, "");
+     Table tbl1 =
+         new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2),
+             tableParams, null, null, "MANAGED_TABLE");
+     objectStore.createTable(tbl1);
+     HashMap<String, String> partitionParams = new HashMap<>();
+     partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true");
+     List<String> value1 = Arrays.asList("US", "CA");
+     Partition part1 = new Partition(value1, DB1, TABLE1, 111, 111, sd, partitionParams);
+     part1.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(part1);
+     List<String> value2 = Arrays.asList("US", "MA");
+     Partition part2 = new Partition(value2, DB1, TABLE1, 222, 222, sd, partitionParams);
+     part2.setCatName(DEFAULT_CATALOG_NAME);
+     objectStore.addPartition(part2);
+ 
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+     Assert.assertEquals(2, partitions.size());
+     Assert.assertEquals(111, partitions.get(0).getCreateTime());
+     Assert.assertEquals(222, partitions.get(1).getCreateTime());
+ 
+     int numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "");
+     Assert.assertEquals(partitions.size(), numPartitions);
+ 
+     numPartitions = objectStore.getNumPartitionsByFilter(DEFAULT_CATALOG_NAME, DB1, TABLE1, "country = \"US\"");
+     Assert.assertEquals(2, numPartitions);
+ 
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1);
+     partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10);
+     Assert.assertEquals(1, partitions.size());
+     Assert.assertEquals(222, partitions.get(0).getCreateTime());
+ 
+     objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2);
+     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
+     objectStore.dropDatabase(db1.getCatalogName(), DB1);
+   }
+ 
+   /**
+    * Checks if the JDO cache is able to handle directSQL partition drops in one session.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropPartitionsCacheInSession()
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     createPartitionedTable(false, false);
+     // query the partitions with JDO
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // drop partitions with directSql
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+ 
+     // query the partitions with JDO, checking the cache is not causing any problem
+     partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(1, partitions.size());
+   }
+ 
+   /**
+    * Checks if the JDO cache is able to handle directSQL partition drops cross sessions.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropPartitionsCacheCrossSession()
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     ObjectStore objectStore2 = new ObjectStore();
+     objectStore2.setConf(conf);
+ 
+     createPartitionedTable(false, false);
+     // query the partitions with JDO in the 1st session
+     Deadline.startTimer("getPartition");
+     List<Partition> partitions = objectStore.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // query the partitions with JDO in the 2nd session
+     partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1, 10,
+         false, true);
+     Assert.assertEquals(3, partitions.size());
+ 
+     // drop partitions with directSql in the 1st session
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1"), true, false);
+ 
+     // query the partitions with JDO in the 2nd session, checking the cache is not causing any
+     // problem
+     partitions = objectStore2.getPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         10, false, true);
+     Assert.assertEquals(1, partitions.size());
+   }
+ 
+   /**
+    * Checks if the directSQL partition drop removes every connected data from the RDBMS tables.
+    * @throws MetaException
+    * @throws InvalidObjectException
+    * @throws NoSuchObjectException
+    * @throws SQLException
+    */
+   @Test
+   public void testDirectSQLDropParitionsCleanup() throws MetaException, InvalidObjectException,
+       NoSuchObjectException, SQLException, InvalidInputException {
+ 
+     createPartitionedTable(true, true);
+ 
+     // Check, that every table in the expected state before the drop
+     checkBackendTableSize("PARTITIONS", 3);
+     checkBackendTableSize("PART_PRIVS", 3);
+     checkBackendTableSize("PART_COL_PRIVS", 3);
+     checkBackendTableSize("PART_COL_STATS", 3);
+     checkBackendTableSize("PARTITION_PARAMS", 3);
+     checkBackendTableSize("PARTITION_KEY_VALS", 3);
+     checkBackendTableSize("SD_PARAMS", 3);
+     checkBackendTableSize("BUCKETING_COLS", 3);
+     checkBackendTableSize("SKEWED_COL_NAMES", 3);
+     checkBackendTableSize("SDS", 4); // Table has an SDS
+     checkBackendTableSize("SORT_COLS", 3);
+     checkBackendTableSize("SERDE_PARAMS", 3);
+     checkBackendTableSize("SERDES", 4); // Table has a serde
+ 
+     // drop the partitions
+     Deadline.startTimer("dropPartitions");
+     objectStore.dropPartitionsInternal(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+         Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), true, false);
+ 
+     // Check, if every data is dropped connected to the partitions
+     checkBackendTableSize("PARTITIONS", 0);
+     checkBackendTableSize("PART_PRIVS", 0);
+     checkBackendTableSize("PART_COL_PRIVS", 0);
+     checkBackendTableSize("PART_COL_STATS", 0);
+     checkBackendTableSize("PARTITION_PARAMS", 0);
+     checkBackendTableSize("PARTITION_KEY_VALS", 0);
+     checkBackendTableSize("SD_PARAMS", 0);
+     checkBackendTableSize("BUCKETING_COLS", 0);
+     checkBackendTableSize("SKEWED_COL_NAMES", 0);
+     checkBackendTableSize("SDS", 1); // Table has an SDS
+     checkBackendTableSize("SORT_COLS", 0);
+     checkBackendTableSize("SERDE_PARAMS", 0);
+     checkBackendTableSize("SERDES", 1); // Table has a serde
+   }
+ 
+   /**
+    * Creates DB1 database, TABLE1 table with 3 partitions.
+    * @param withPrivileges Should we create privileges as well
+    * @param withStatistics Should we create statitics as well
+    * @throws MetaException
+    * @throws InvalidObjectException
+    */
+   private void createPartitionedTable(boolean withPrivileges, boolean withStatistics)
+       throws MetaException, InvalidObjectException, NoSuchObjectException, InvalidInputException {
+     Database db1 = new DatabaseBuilder()
+                        .setName(DB1)
+                        .setDescription("description")
+                        .setLocation("locationurl")
+                        .build(conf);
+     objectStore.createDatabase(db1);
+     Table tbl1 =
+         new TableBuilder()
+             .setDbName(DB1)
+             .setTableName(TABLE1)
+             .addCol("test_col1", "int")
+             .addCol("test_col2", "int")
+             .addPartCol("test_part_col", "int")
+             .addCol("test_bucket_col", "int", "test bucket col comment")
+             .addCol("test_skewed_col", "int", "test skewed col comment")
+             .addCol("test_sort_col", "int", "test sort col comment")
+             .build(conf);
+     objectStore.createTable(tbl1);
+ 
+     PrivilegeBag privilegeBag = new PrivilegeBag();
+     // Create partitions for the partitioned table
+     for(int i=0; i < 3; i++) {
+       Partition part = new PartitionBuilder()
+                            .inTable(tbl1)
+                            .addValue("a" + i)
+                            .addSerdeParam("serdeParam", "serdeParamValue")
+                            .addStorageDescriptorParam("sdParam", "sdParamValue")
+                            .addBucketCol("test_bucket_col")
+                            .addSkewedColName("test_skewed_col")
+                            .addSortCol("test_sort_col", 1)
+                            .build(conf);
+       objectStore.addPartition(part);
+ 
+       if (withPrivileges) {
+         HiveObjectRef partitionReference = new HiveObjectRefBuilder().buildPartitionReference(part);
+         HiveObjectRef partitionColumnReference = new HiveObjectRefBuilder()
+             .buildPartitionColumnReference(tbl1, "test_part_col", part.getValues());
+         PrivilegeGrantInfo privilegeGrantInfo = new PrivilegeGrantInfoBuilder()
+             .setPrivilege("a")
+             .build();
+         HiveObjectPrivilege partitionPriv = new HiveObjectPrivilegeBuilder()
+                                                 .setHiveObjectRef(partitionReference)
+                                                 .setPrincipleName("a")
+                                                 .setPrincipalType(PrincipalType.USER)
+                                                 .setGrantInfo(privilegeGrantInfo)
+                                                 .build();
+         privilegeBag.addToPrivileges(partitionPriv);
+         HiveObjectPrivilege partitionColPriv = new HiveObjectPrivilegeBuilder()
+                                                    .setHiveObjectRef(partitionColumnReference)
+                                                    .setPrincipleName("a")
+                                                    .setPrincipalType(PrincipalType.USER)
+                                                    .setGrantInfo(privilegeGrantInfo)
+                                                    .build();
+         privilegeBag.addToPrivileges(partitionColPriv);
+       }
+ 
+       if (withStatistics) {
+         ColumnStatistics stats = new ColumnStatistics();
+         ColumnStatisticsDesc desc = new ColumnStatisticsDesc();
+         desc.setCatName(tbl1.getCatName());
+         desc.setDbName(tbl1.getDbName());
+         desc.setTableName(tbl1.getTableName());
+         desc.setPartName("test_part_col=a" + i);
+         stats.setStatsDesc(desc);
+ 
+         List<ColumnStatisticsObj> statsObjList = new ArrayList<>(1);
+         stats.setStatsObj(statsObjList);
+ 
+         ColumnStatisticsData data = new ColumnStatisticsData();
+         BooleanColumnStatsData boolStats = new BooleanColumnStatsData();
+         boolStats.setNumTrues(0);
+         boolStats.setNumFalses(0);
+         boolStats.setNumNulls(0);
+         data.setBooleanStats(boolStats);
+ 
+         ColumnStatisticsObj partStats = new ColumnStatisticsObj("test_part_col", "int", data);
+         statsObjList.add(partStats);
+ 
+         objectStore.updatePartitionColumnStatistics(stats, part.getValues());
+       }
+     }
+     if (withPrivileges) {
+       objectStore.grantPrivileges(privilegeBag);
+     }
+   }
+ 
+   /**
+    * Checks if the HMS backend db row number is as expected. If they are not, an
+    * {@link AssertionError} is thrown.
+    * @param tableName The table in which we count the rows
+    * @param size The expected row number
+    * @throws SQLException If there is a problem connecting to / querying the backend DB
+    */
+   private void checkBackendTableSize(String tableName, int size) throws SQLException {
+     String connectionStr = MetastoreConf.getVar(conf, MetastoreConf.ConfVars.CONNECT_URL_KEY);
+     Connection conn = DriverManager.getConnection(connectionStr);
+     Statement stmt = conn.createStatement();
+ 
+     ResultSet rs = stmt.executeQuery("SELECT COUNT(1) FROM " + tableName);
+     rs.next();
+     Assert.assertEquals(tableName + " table should contain " + size + " rows", size,
+         rs.getLong(1));
+   }
+ 
+   /**
+    * Test master keys operation
+    */
+   @Test
+   public void testMasterKeyOps() throws MetaException, NoSuchObjectException {
+     int id1 = objectStore.addMasterKey(KEY1);
+     int id2 = objectStore.addMasterKey(KEY2);
+ 
+     String[] keys = objectStore.getMasterKeys();
+     Assert.assertEquals(2, keys.length);
+     Assert.assertEquals(KEY1, keys[0]);
+     Assert.assertEquals(KEY2, keys[1]);
+ 
+     objectStore.updateMasterKey(id1, "new" + KEY1);
+     objectStore.updateMasterKey(id2, "new" + KEY2);
+     keys = objectStore.getMasterKeys();
+     Assert.assertEquals(2, keys.length);
+     Assert.assertEquals("new" + KEY1, keys[0]);
+     Assert.assertEquals("new" + KEY2, keys[1]);
+ 
+     objectStore.removeMasterKey(id1);
+     keys = objectStore.getMasterKeys();
+     Assert.assertEquals(1, keys.length);
+     Assert.assertEquals("new" + KEY2, keys[0]);
+ 
+     objectStore.removeMasterKey(id2);
+   }
+ 
+   /**
+    * Test role operation
+    */
+   @Test
+   public void testRoleOps() throws InvalidObjectException, MetaException, NoSuchObjectException {
+     objectStore.addRole(ROLE1, OWNER);
+     objectStore.addRole(ROLE2, OWNER);
+     List<String> roles = objectStore.listRoleNames();
+     Assert.assertEquals(2, roles.size());
+     Assert.assertEquals(ROLE2, roles.get(1));
+     Role role1 = objectStore.getRole(ROLE1);
+     Assert.assertEquals(OWNER, role1.getOwnerName());
+     objectStore.grantRole(role1, USER1, PrincipalType.USER, OWNER, PrincipalType.ROLE, true);
+     objectStore.revokeRole(role1, USER1, PrincipalType.USER, false);
+     objectStore.removeRole(ROLE1);
+   }
+ 
+   @Test
+   public void testDirectSqlErrorMetrics() throws Exception {
+     Configuration conf = MetastoreConf.newMetastoreConf();
+     MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.METRICS_ENABLED, true);
+     Metrics.initialize(conf);
+     MetastoreConf.setVar(conf, MetastoreConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES,
+         "org.apache.hadoop.hive.common.metrics.metrics2.JsonFileMetricsReporter, " +
+             "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter"
+     );
+ 
+     // recall setup so that we get an object store with the metrics initalized
+     setUp();
+     Counter directSqlErrors =
+         Metrics.getRegistry().getCounters().get(MetricsConstants.DIRECTSQL_ERRORS);
+ 
+     objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+       @Override
+       protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+         return null;
+       }
+ 
+       @Override
+       protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+           NoSuchObjectException {
+         return null;
+       }
+     }.run(false);
+ 
+     Assert.assertEquals(0, directSqlErrors.getCount());
+ 
+     objectStore.new GetDbHelper(DEFAULT_CATALOG_NAME, "foo", true, true) {
+       @Override
+       protected Database getSqlResult(ObjectStore.GetHelper<Database> ctx) throws MetaException {
+         throw new RuntimeException();
+       }
+ 
+       @Override
+       protected Database getJdoResult(ObjectStore.GetHelper<Database> ctx) throws MetaException,
+           NoSuchObjectException {
+         return null;
+       }
+     }.run(false);
+ 
+     Assert.assertEquals(1, directSqlErrors.getCount());
+   }
+ 
+   private static void dropAllStoreObjects(RawStore store)
+       throws MetaException, InvalidObjectException, InvalidInputException {
+     try {
+       Deadline.registerIfNot(100000);
+       List<Function> functions = store.getAllFunctions(DEFAULT_CATALOG_NAME);
+       for (Function func : functions) {
+         store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), func.getFunctionName());
+       }
+       for (String catName : store.getCatalogs()) {
+         List<String> dbs = store.getAllDatabases(catName);
+         for (String db : dbs) {
+           List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
+           for (String tbl : tbls) {
+             Deadline.startTimer("getPartition");
+             List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, db, tbl, 100);
+             for (Partition part : parts) {
+               store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, part.getValues());
+             }
+             // Find any constraints and drop them
+             Set<String> constraints = new HashSet<>();
+             List<SQLPrimaryKey> pk = store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl);
+             if (pk != null) {
+               for (SQLPrimaryKey pkcol : pk) {
+                 constraints.add(pkcol.getPk_name());
+               }
+             }
+             List<SQLForeignKey> fks = store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl);
+             if (fks != null) {
+               for (SQLForeignKey fkcol : fks) {
+                 constraints.add(fkcol.getFk_name());
+               }
+             }
+             for (String constraint : constraints) {
+               store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint);
+             }
+             store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
+           }
+           store.dropDatabase(catName, db);
+         }
+         store.dropCatalog(catName);
+       }
+       List<String> roles = store.listRoleNames();
+       for (String role : roles) {
+         store.removeRole(role);
+       }
+     } catch (NoSuchObjectException e) {
+     }
+   }
+ 
+   @Test
+   public void testQueryCloseOnError() throws Exception {
+     ObjectStore spy = Mockito.spy(objectStore);
+     spy.getAllDatabases(DEFAULT_CATALOG_NAME);
+     spy.getAllFunctions(DEFAULT_CATALOG_NAME);
+     spy.getAllTables(DEFAULT_CATALOG_NAME, DB1);
+     spy.getPartitionCount();
+     Mockito.verify(spy, Mockito.times(3))
+         .rollbackAndCleanup(Mockito.anyBoolean(), Mockito.<Query>anyObject());
+   }
+ 
+   @Test
+   public void testRetryingExecutorSleep() throws Exception {
+     RetryingExecutor re = new ObjectStore.RetryingExecutor(MetastoreConf.newMetastoreConf(), null);
+     Assert.assertTrue("invalid sleep value", re.getSleepInterval() >= 0);
+   }
+ 
+   @Ignore // See comment in ObjectStore.getDataSourceProps
+   @Test
+   public void testNonConfDatanucleusValueSet() {
+     String key = "datanucleus.no.such.key";
+     String value = "test_value";
+     String key1 = "blabla.no.such.key";
+     String value1 = "another_value";
+     Assume.assumeTrue(System.getProperty(key) == null);
+     Configuration localConf = MetastoreConf.newMetastoreConf();
+     MetaStoreTestUtils.setConfForStandloneMode(localConf);
+     localConf.set(key, value);
+     localConf.set(key1, value1);
+     objectStore = new ObjectStore();
+     objectStore.setConf(localConf);
+     Assert.assertEquals(value, objectStore.getProp().getProperty(key));
+     Assert.assertNull(objectStore.getProp().getProperty(key1));
+   }
+ 
+   /**
+    * Test notification operations
+    */
+   // TODO MS-SPLIT uncomment once we move EventMessage over
+   @Test
+   public void testNotificationOps() throws InterruptedException, MetaException {
+     final int NO_EVENT_ID = 0;
+     final int FIRST_EVENT_ID = 1;
+     final int SECOND_EVENT_ID = 2;
+ 
+     NotificationEvent event =
+         new NotificationEvent(0, 0, EventMessage.EventType.CREATE_DATABASE.toString(), "");
+     NotificationEventResponse eventResponse;
+     CurrentNotificationEventId eventId;
+ 
+     // Verify that there is no notifications available yet
+     eventId = objectStore.getCurrentNotificationEventId();
+     Assert.assertEquals(NO_EVENT_ID, eventId.getEventId());
+ 
+     // Verify that addNotificationEvent() updates the NotificationEvent with the new event ID
+     objectStore.addNotificationEvent(event);
+     Assert.assertEquals(FIRST_EVENT_ID, event.getEventId());
+     objectStore.addNotificationEvent(event);
+     Assert.assertEquals(SECOND_EVENT_ID, event.getEventId());
+ 
+     // Verify that objectStore fetches the latest notification event ID
+     eventId = objectStore.getCurrentNotificationEventId();
+     Assert.assertEquals(SECOND_EVENT_ID, eventId.getEventId());
+ 
+     // Verify that getNextNotification() returns all events
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+     Assert.assertEquals(2, eventResponse.getEventsSize());
+     Assert.assertEquals(FIRST_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+     Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(1).getEventId());
+ 
+     // Verify that getNextNotification(last) returns events after a specified event
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest(FIRST_EVENT_ID));
+     Assert.assertEquals(1, eventResponse.getEventsSize());
+     Assert.assertEquals(SECOND_EVENT_ID, eventResponse.getEvents().get(0).getEventId());
+ 
+     // Verify that getNextNotification(last) returns zero events if there are no more notifications available
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest(SECOND_EVENT_ID));
+     Assert.assertEquals(0, eventResponse.getEventsSize());
+ 
+     // Verify that cleanNotificationEvents() cleans up all old notifications
+     Thread.sleep(1);
+     objectStore.cleanNotificationEvents(1);
+     eventResponse = objectStore.getNextNotification(new NotificationEventRequest());
+     Assert.assertEquals(0, eventResponse.getEventsSize());
+   }
+ 
+   @Ignore(
+       "This test is here to allow testing with other databases like mysql / postgres etc\n"
+           + " with  user changes to the code. This cannot be run on apache derby because of\n"
+           + " https://db.apache.org/derby/docs/10.10/devguide/cdevconcepts842385.html"
+   )
+   @Test
+   public void testConcurrentAddNotifications() throws ExecutionException, InterruptedException, MetaException {
+ 
+     final int NUM_THREADS = 10;
+     CyclicBarrier cyclicBarrier = new CyclicBarrier(NUM_THREADS,
+         () -> LoggerFactory.getLogger("test")
+             .debug(NUM_THREADS + " threads going to add notification"));
+ 
+     Configuration conf = MetastoreConf.newMetastoreConf();
+     MetaStoreTestUtils.setConfForStandloneMode(conf);
+     /*
+        Below are the properties that need to be set based on what database this test is going to be run
+      */
+ 
+ //    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_DRIVER, "com.mysql.jdbc.Driver");
+ //    conf.setVar(HiveConf.ConfVars.METASTORECONNECTURLKEY,
+ //        "jdbc:mysql://localhost:3306/metastore_db");
+ //    conf.setVar(HiveConf.ConfVars.METASTORE_CONNECTION_USER_NAME, "");
+ //    conf.setVar(HiveConf.ConfVars.METASTOREPWD, "");
+ 
+     /*
+      we have to  add this one manually as for tests the db is initialized via the metastoreDiretSQL
+      and we don't run the schema creation sql that includes the an insert for notification_sequence
+      which can be locked. the entry in notification_sequence happens via notification_event insertion.
+     */
+     objectStore.getPersistenceManager().newQuery(MNotificationLog.class, "eventType==''").execute();
+     objectStore.getPersistenceManager().newQuery(MNotificationNextId.class, "nextEventId==-1").execute();
+ 
+     objectStore.addNotificationEvent(
+         new NotificationEvent(0, 0,
+             EventMessage.EventType.CREATE_DATABASE.toString(),
+             "CREATE DATABASE DB initial"));
+ 
+     ExecutorService executorService = Executors.newFixedThreadPool(NUM_THREADS);
+     for (int i = 0; i < NUM_THREADS; i++) {
+       final int n = i;
+ 
+       executorService.execute(
+           () -> {
+             ObjectStore store = new ObjectStore();
+             store.setConf(conf);
+ 
+             String eventType = EventMessage.EventType.CREATE_DATABASE.toString();
+             NotificationEvent dbEvent =
+                 new NotificationEvent(0, 0, eventType,
+                     "CREATE DATABASE DB" + n);
+             System.out.println("ADDING NOTIFICATION");
+ 
+             try {
+               cyclicBarrier.await();
+               store.addNotificationEvent(dbEvent);
+             } catch (InterruptedException | BrokenBarrierException | MetaException e) {
+               throw new RuntimeException(e);
+             }
+             System.out.println("FINISH NOTIFICATION");
+           });
+     }
+     executorService.shutdown();
+     Assert.assertTrue(executorService.awaitTermination(15, TimeUnit.SECONDS));
+ 
+     // we have to setup this again as the underlying PMF keeps getting reinitialized with original
+     // reference closed
+     ObjectStore store = new ObjectStore();
+     store.setConf(conf);
+ 
+     NotificationEventResponse eventResponse = store.getNextNotification(
+         new NotificationEventRequest());
+     Assert.assertEquals(NUM_THREADS + 1, eventResponse.getEventsSize());
+     long previousId = 0;
+     for (NotificationEvent event : eventResponse.getEvents()) {
+       Assert.assertTrue("previous:" + previousId + " current:" + event.getEventId(),
+           previousId < event.getEventId());
+       Assert.assertTrue(previousId + 1 == event.getEventId());
+       previousId = event.getEventId();
+     }
+   }
+ 
+   private void createTestCatalog(String catName) throws MetaException {
+     Catalog cat = new CatalogBuilder()
+         .setName(catName)
+         .setLocation("/tmp")
+         .build();
+     objectStore.createCatalog(cat);
+   }
+ }
+ 


[38/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java
new file mode 100644
index 0000000..2ba0407
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsRequest.java
@@ -0,0 +1,591 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CheckConstraintsRequest implements org.apache.thrift.TBase<CheckConstraintsRequest, CheckConstraintsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CheckConstraintsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CheckConstraintsRequest");
+
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CheckConstraintsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CheckConstraintsRequestTupleSchemeFactory());
+  }
+
+  private String catName; // required
+  private String db_name; // required
+  private String tbl_name; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CAT_NAME((short)1, "catName"),
+    DB_NAME((short)2, "db_name"),
+    TBL_NAME((short)3, "tbl_name");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CAT_NAME
+          return CAT_NAME;
+        case 2: // DB_NAME
+          return DB_NAME;
+        case 3: // TBL_NAME
+          return TBL_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CheckConstraintsRequest.class, metaDataMap);
+  }
+
+  public CheckConstraintsRequest() {
+  }
+
+  public CheckConstraintsRequest(
+    String catName,
+    String db_name,
+    String tbl_name)
+  {
+    this();
+    this.catName = catName;
+    this.db_name = db_name;
+    this.tbl_name = tbl_name;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CheckConstraintsRequest(CheckConstraintsRequest other) {
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+    if (other.isSetDb_name()) {
+      this.db_name = other.db_name;
+    }
+    if (other.isSetTbl_name()) {
+      this.tbl_name = other.tbl_name;
+    }
+  }
+
+  public CheckConstraintsRequest deepCopy() {
+    return new CheckConstraintsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.catName = null;
+    this.db_name = null;
+    this.tbl_name = null;
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public String getDb_name() {
+    return this.db_name;
+  }
+
+  public void setDb_name(String db_name) {
+    this.db_name = db_name;
+  }
+
+  public void unsetDb_name() {
+    this.db_name = null;
+  }
+
+  /** Returns true if field db_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetDb_name() {
+    return this.db_name != null;
+  }
+
+  public void setDb_nameIsSet(boolean value) {
+    if (!value) {
+      this.db_name = null;
+    }
+  }
+
+  public String getTbl_name() {
+    return this.tbl_name;
+  }
+
+  public void setTbl_name(String tbl_name) {
+    this.tbl_name = tbl_name;
+  }
+
+  public void unsetTbl_name() {
+    this.tbl_name = null;
+  }
+
+  /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */
+  public boolean isSetTbl_name() {
+    return this.tbl_name != null;
+  }
+
+  public void setTbl_nameIsSet(boolean value) {
+    if (!value) {
+      this.tbl_name = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDb_name();
+      } else {
+        setDb_name((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTbl_name();
+      } else {
+        setTbl_name((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CAT_NAME:
+      return getCatName();
+
+    case DB_NAME:
+      return getDb_name();
+
+    case TBL_NAME:
+      return getTbl_name();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CAT_NAME:
+      return isSetCatName();
+    case DB_NAME:
+      return isSetDb_name();
+    case TBL_NAME:
+      return isSetTbl_name();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CheckConstraintsRequest)
+      return this.equals((CheckConstraintsRequest)that);
+    return false;
+  }
+
+  public boolean equals(CheckConstraintsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    boolean this_present_db_name = true && this.isSetDb_name();
+    boolean that_present_db_name = true && that.isSetDb_name();
+    if (this_present_db_name || that_present_db_name) {
+      if (!(this_present_db_name && that_present_db_name))
+        return false;
+      if (!this.db_name.equals(that.db_name))
+        return false;
+    }
+
+    boolean this_present_tbl_name = true && this.isSetTbl_name();
+    boolean that_present_tbl_name = true && that.isSetTbl_name();
+    if (this_present_tbl_name || that_present_tbl_name) {
+      if (!(this_present_tbl_name && that_present_tbl_name))
+        return false;
+      if (!this.tbl_name.equals(that.tbl_name))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    boolean present_db_name = true && (isSetDb_name());
+    list.add(present_db_name);
+    if (present_db_name)
+      list.add(db_name);
+
+    boolean present_tbl_name = true && (isSetTbl_name());
+    list.add(present_tbl_name);
+    if (present_tbl_name)
+      list.add(tbl_name);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CheckConstraintsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDb_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTbl_name()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CheckConstraintsRequest(");
+    boolean first = true;
+
+    sb.append("catName:");
+    if (this.catName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.catName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("db_name:");
+    if (this.db_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.db_name);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tbl_name:");
+    if (this.tbl_name == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tbl_name);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetCatName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'catName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDb_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'db_name' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTbl_name()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tbl_name' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CheckConstraintsRequestStandardSchemeFactory implements SchemeFactory {
+    public CheckConstraintsRequestStandardScheme getScheme() {
+      return new CheckConstraintsRequestStandardScheme();
+    }
+  }
+
+  private static class CheckConstraintsRequestStandardScheme extends StandardScheme<CheckConstraintsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.db_name = iprot.readString();
+              struct.setDb_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tbl_name = iprot.readString();
+              struct.setTbl_nameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CheckConstraintsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.catName != null) {
+        oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+        oprot.writeString(struct.catName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.db_name != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.db_name);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tbl_name != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tbl_name);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CheckConstraintsRequestTupleSchemeFactory implements SchemeFactory {
+    public CheckConstraintsRequestTupleScheme getScheme() {
+      return new CheckConstraintsRequestTupleScheme();
+    }
+  }
+
+  private static class CheckConstraintsRequestTupleScheme extends TupleScheme<CheckConstraintsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.catName);
+      oprot.writeString(struct.db_name);
+      oprot.writeString(struct.tbl_name);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.catName = iprot.readString();
+      struct.setCatNameIsSet(true);
+      struct.db_name = iprot.readString();
+      struct.setDb_nameIsSet(true);
+      struct.tbl_name = iprot.readString();
+      struct.setTbl_nameIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java
new file mode 100644
index 0000000..8d4f7be
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckConstraintsResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CheckConstraintsResponse implements org.apache.thrift.TBase<CheckConstraintsResponse, CheckConstraintsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<CheckConstraintsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CheckConstraintsResponse");
+
+  private static final org.apache.thrift.protocol.TField CHECK_CONSTRAINTS_FIELD_DESC = new org.apache.thrift.protocol.TField("checkConstraints", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CheckConstraintsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CheckConstraintsResponseTupleSchemeFactory());
+  }
+
+  private List<SQLCheckConstraint> checkConstraints; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    CHECK_CONSTRAINTS((short)1, "checkConstraints");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // CHECK_CONSTRAINTS
+          return CHECK_CONSTRAINTS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.CHECK_CONSTRAINTS, new org.apache.thrift.meta_data.FieldMetaData("checkConstraints", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLCheckConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CheckConstraintsResponse.class, metaDataMap);
+  }
+
+  public CheckConstraintsResponse() {
+  }
+
+  public CheckConstraintsResponse(
+    List<SQLCheckConstraint> checkConstraints)
+  {
+    this();
+    this.checkConstraints = checkConstraints;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CheckConstraintsResponse(CheckConstraintsResponse other) {
+    if (other.isSetCheckConstraints()) {
+      List<SQLCheckConstraint> __this__checkConstraints = new ArrayList<SQLCheckConstraint>(other.checkConstraints.size());
+      for (SQLCheckConstraint other_element : other.checkConstraints) {
+        __this__checkConstraints.add(new SQLCheckConstraint(other_element));
+      }
+      this.checkConstraints = __this__checkConstraints;
+    }
+  }
+
+  public CheckConstraintsResponse deepCopy() {
+    return new CheckConstraintsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.checkConstraints = null;
+  }
+
+  public int getCheckConstraintsSize() {
+    return (this.checkConstraints == null) ? 0 : this.checkConstraints.size();
+  }
+
+  public java.util.Iterator<SQLCheckConstraint> getCheckConstraintsIterator() {
+    return (this.checkConstraints == null) ? null : this.checkConstraints.iterator();
+  }
+
+  public void addToCheckConstraints(SQLCheckConstraint elem) {
+    if (this.checkConstraints == null) {
+      this.checkConstraints = new ArrayList<SQLCheckConstraint>();
+    }
+    this.checkConstraints.add(elem);
+  }
+
+  public List<SQLCheckConstraint> getCheckConstraints() {
+    return this.checkConstraints;
+  }
+
+  public void setCheckConstraints(List<SQLCheckConstraint> checkConstraints) {
+    this.checkConstraints = checkConstraints;
+  }
+
+  public void unsetCheckConstraints() {
+    this.checkConstraints = null;
+  }
+
+  /** Returns true if field checkConstraints is set (has been assigned a value) and false otherwise */
+  public boolean isSetCheckConstraints() {
+    return this.checkConstraints != null;
+  }
+
+  public void setCheckConstraintsIsSet(boolean value) {
+    if (!value) {
+      this.checkConstraints = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case CHECK_CONSTRAINTS:
+      if (value == null) {
+        unsetCheckConstraints();
+      } else {
+        setCheckConstraints((List<SQLCheckConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case CHECK_CONSTRAINTS:
+      return getCheckConstraints();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case CHECK_CONSTRAINTS:
+      return isSetCheckConstraints();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CheckConstraintsResponse)
+      return this.equals((CheckConstraintsResponse)that);
+    return false;
+  }
+
+  public boolean equals(CheckConstraintsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_checkConstraints = true && this.isSetCheckConstraints();
+    boolean that_present_checkConstraints = true && that.isSetCheckConstraints();
+    if (this_present_checkConstraints || that_present_checkConstraints) {
+      if (!(this_present_checkConstraints && that_present_checkConstraints))
+        return false;
+      if (!this.checkConstraints.equals(that.checkConstraints))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_checkConstraints = true && (isSetCheckConstraints());
+    list.add(present_checkConstraints);
+    if (present_checkConstraints)
+      list.add(checkConstraints);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CheckConstraintsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetCheckConstraints()).compareTo(other.isSetCheckConstraints());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCheckConstraints()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.checkConstraints, other.checkConstraints);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CheckConstraintsResponse(");
+    boolean first = true;
+
+    sb.append("checkConstraints:");
+    if (this.checkConstraints == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.checkConstraints);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetCheckConstraints()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'checkConstraints' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CheckConstraintsResponseStandardSchemeFactory implements SchemeFactory {
+    public CheckConstraintsResponseStandardScheme getScheme() {
+      return new CheckConstraintsResponseStandardScheme();
+    }
+  }
+
+  private static class CheckConstraintsResponseStandardScheme extends StandardScheme<CheckConstraintsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CheckConstraintsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // CHECK_CONSTRAINTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list360 = iprot.readListBegin();
+                struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list360.size);
+                SQLCheckConstraint _elem361;
+                for (int _i362 = 0; _i362 < _list360.size; ++_i362)
+                {
+                  _elem361 = new SQLCheckConstraint();
+                  _elem361.read(iprot);
+                  struct.checkConstraints.add(_elem361);
+                }
+                iprot.readListEnd();
+              }
+              struct.setCheckConstraintsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CheckConstraintsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.checkConstraints != null) {
+        oprot.writeFieldBegin(CHECK_CONSTRAINTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.checkConstraints.size()));
+          for (SQLCheckConstraint _iter363 : struct.checkConstraints)
+          {
+            _iter363.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CheckConstraintsResponseTupleSchemeFactory implements SchemeFactory {
+    public CheckConstraintsResponseTupleScheme getScheme() {
+      return new CheckConstraintsResponseTupleScheme();
+    }
+  }
+
+  private static class CheckConstraintsResponseTupleScheme extends TupleScheme<CheckConstraintsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.checkConstraints.size());
+        for (SQLCheckConstraint _iter364 : struct.checkConstraints)
+        {
+          _iter364.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CheckConstraintsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list365 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.checkConstraints = new ArrayList<SQLCheckConstraint>(_list365.size);
+        SQLCheckConstraint _elem366;
+        for (int _i367 = 0; _i367 < _list365.size; ++_i367)
+        {
+          _elem366 = new SQLCheckConstraint();
+          _elem366.read(iprot);
+          struct.checkConstraints.add(_elem366);
+        }
+      }
+      struct.setCheckConstraintsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
new file mode 100644
index 0000000..b4a016a
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/CheckLockRequest.java
@@ -0,0 +1,589 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class CheckLockRequest implements org.apache.thrift.TBase<CheckLockRequest, CheckLockRequest._Fields>, java.io.Serializable, Cloneable, Comparable<CheckLockRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("CheckLockRequest");
+
+  private static final org.apache.thrift.protocol.TField LOCKID_FIELD_DESC = new org.apache.thrift.protocol.TField("lockid", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField ELAPSED_MS_FIELD_DESC = new org.apache.thrift.protocol.TField("elapsed_ms", org.apache.thrift.protocol.TType.I64, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new CheckLockRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new CheckLockRequestTupleSchemeFactory());
+  }
+
+  private long lockid; // required
+  private long txnid; // optional
+  private long elapsed_ms; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    LOCKID((short)1, "lockid"),
+    TXNID((short)2, "txnid"),
+    ELAPSED_MS((short)3, "elapsed_ms");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // LOCKID
+          return LOCKID;
+        case 2: // TXNID
+          return TXNID;
+        case 3: // ELAPSED_MS
+          return ELAPSED_MS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __LOCKID_ISSET_ID = 0;
+  private static final int __TXNID_ISSET_ID = 1;
+  private static final int __ELAPSED_MS_ISSET_ID = 2;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.TXNID,_Fields.ELAPSED_MS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.LOCKID, new org.apache.thrift.meta_data.FieldMetaData("lockid", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.ELAPSED_MS, new org.apache.thrift.meta_data.FieldMetaData("elapsed_ms", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(CheckLockRequest.class, metaDataMap);
+  }
+
+  public CheckLockRequest() {
+  }
+
+  public CheckLockRequest(
+    long lockid)
+  {
+    this();
+    this.lockid = lockid;
+    setLockidIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public CheckLockRequest(CheckLockRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.lockid = other.lockid;
+    this.txnid = other.txnid;
+    this.elapsed_ms = other.elapsed_ms;
+  }
+
+  public CheckLockRequest deepCopy() {
+    return new CheckLockRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    setLockidIsSet(false);
+    this.lockid = 0;
+    setTxnidIsSet(false);
+    this.txnid = 0;
+    setElapsed_msIsSet(false);
+    this.elapsed_ms = 0;
+  }
+
+  public long getLockid() {
+    return this.lockid;
+  }
+
+  public void setLockid(long lockid) {
+    this.lockid = lockid;
+    setLockidIsSet(true);
+  }
+
+  public void unsetLockid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LOCKID_ISSET_ID);
+  }
+
+  /** Returns true if field lockid is set (has been assigned a value) and false otherwise */
+  public boolean isSetLockid() {
+    return EncodingUtils.testBit(__isset_bitfield, __LOCKID_ISSET_ID);
+  }
+
+  public void setLockidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LOCKID_ISSET_ID, value);
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public long getElapsed_ms() {
+    return this.elapsed_ms;
+  }
+
+  public void setElapsed_ms(long elapsed_ms) {
+    this.elapsed_ms = elapsed_ms;
+    setElapsed_msIsSet(true);
+  }
+
+  public void unsetElapsed_ms() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ELAPSED_MS_ISSET_ID);
+  }
+
+  /** Returns true if field elapsed_ms is set (has been assigned a value) and false otherwise */
+  public boolean isSetElapsed_ms() {
+    return EncodingUtils.testBit(__isset_bitfield, __ELAPSED_MS_ISSET_ID);
+  }
+
+  public void setElapsed_msIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ELAPSED_MS_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case LOCKID:
+      if (value == null) {
+        unsetLockid();
+      } else {
+        setLockid((Long)value);
+      }
+      break;
+
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    case ELAPSED_MS:
+      if (value == null) {
+        unsetElapsed_ms();
+      } else {
+        setElapsed_ms((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case LOCKID:
+      return getLockid();
+
+    case TXNID:
+      return getTxnid();
+
+    case ELAPSED_MS:
+      return getElapsed_ms();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case LOCKID:
+      return isSetLockid();
+    case TXNID:
+      return isSetTxnid();
+    case ELAPSED_MS:
+      return isSetElapsed_ms();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof CheckLockRequest)
+      return this.equals((CheckLockRequest)that);
+    return false;
+  }
+
+  public boolean equals(CheckLockRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_lockid = true;
+    boolean that_present_lockid = true;
+    if (this_present_lockid || that_present_lockid) {
+      if (!(this_present_lockid && that_present_lockid))
+        return false;
+      if (this.lockid != that.lockid)
+        return false;
+    }
+
+    boolean this_present_txnid = true && this.isSetTxnid();
+    boolean that_present_txnid = true && that.isSetTxnid();
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    boolean this_present_elapsed_ms = true && this.isSetElapsed_ms();
+    boolean that_present_elapsed_ms = true && that.isSetElapsed_ms();
+    if (this_present_elapsed_ms || that_present_elapsed_ms) {
+      if (!(this_present_elapsed_ms && that_present_elapsed_ms))
+        return false;
+      if (this.elapsed_ms != that.elapsed_ms)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_lockid = true;
+    list.add(present_lockid);
+    if (present_lockid)
+      list.add(lockid);
+
+    boolean present_txnid = true && (isSetTxnid());
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    boolean present_elapsed_ms = true && (isSetElapsed_ms());
+    list.add(present_elapsed_ms);
+    if (present_elapsed_ms)
+      list.add(elapsed_ms);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(CheckLockRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetLockid()).compareTo(other.isSetLockid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLockid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lockid, other.lockid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetElapsed_ms()).compareTo(other.isSetElapsed_ms());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetElapsed_ms()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.elapsed_ms, other.elapsed_ms);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("CheckLockRequest(");
+    boolean first = true;
+
+    sb.append("lockid:");
+    sb.append(this.lockid);
+    first = false;
+    if (isSetTxnid()) {
+      if (!first) sb.append(", ");
+      sb.append("txnid:");
+      sb.append(this.txnid);
+      first = false;
+    }
+    if (isSetElapsed_ms()) {
+      if (!first) sb.append(", ");
+      sb.append("elapsed_ms:");
+      sb.append(this.elapsed_ms);
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetLockid()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'lockid' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class CheckLockRequestStandardSchemeFactory implements SchemeFactory {
+    public CheckLockRequestStandardScheme getScheme() {
+      return new CheckLockRequestStandardScheme();
+    }
+  }
+
+  private static class CheckLockRequestStandardScheme extends StandardScheme<CheckLockRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, CheckLockRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // LOCKID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.lockid = iprot.readI64();
+              struct.setLockidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // ELAPSED_MS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.elapsed_ms = iprot.readI64();
+              struct.setElapsed_msIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, CheckLockRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(LOCKID_FIELD_DESC);
+      oprot.writeI64(struct.lockid);
+      oprot.writeFieldEnd();
+      if (struct.isSetTxnid()) {
+        oprot.writeFieldBegin(TXNID_FIELD_DESC);
+        oprot.writeI64(struct.txnid);
+        oprot.writeFieldEnd();
+      }
+      if (struct.isSetElapsed_ms()) {
+        oprot.writeFieldBegin(ELAPSED_MS_FIELD_DESC);
+        oprot.writeI64(struct.elapsed_ms);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class CheckLockRequestTupleSchemeFactory implements SchemeFactory {
+    public CheckLockRequestTupleScheme getScheme() {
+      return new CheckLockRequestTupleScheme();
+    }
+  }
+
+  private static class CheckLockRequestTupleScheme extends TupleScheme<CheckLockRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, CheckLockRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.lockid);
+      BitSet optionals = new BitSet();
+      if (struct.isSetTxnid()) {
+        optionals.set(0);
+      }
+      if (struct.isSetElapsed_ms()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetTxnid()) {
+        oprot.writeI64(struct.txnid);
+      }
+      if (struct.isSetElapsed_ms()) {
+        oprot.writeI64(struct.elapsed_ms);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, CheckLockRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.lockid = iprot.readI64();
+      struct.setLockidIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.txnid = iprot.readI64();
+        struct.setTxnidIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.elapsed_ms = iprot.readI64();
+        struct.setElapsed_msIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
new file mode 100644
index 0000000..1af1628
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ClearFileMetadataRequest implements org.apache.thrift.TBase<ClearFileMetadataRequest, ClearFileMetadataRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ClearFileMetadataRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClearFileMetadataRequest");
+
+  private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ClearFileMetadataRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ClearFileMetadataRequestTupleSchemeFactory());
+  }
+
+  private List<Long> fileIds; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FILE_IDS((short)1, "fileIds");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FILE_IDS
+          return FILE_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClearFileMetadataRequest.class, metaDataMap);
+  }
+
+  public ClearFileMetadataRequest() {
+  }
+
+  public ClearFileMetadataRequest(
+    List<Long> fileIds)
+  {
+    this();
+    this.fileIds = fileIds;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ClearFileMetadataRequest(ClearFileMetadataRequest other) {
+    if (other.isSetFileIds()) {
+      List<Long> __this__fileIds = new ArrayList<Long>(other.fileIds);
+      this.fileIds = __this__fileIds;
+    }
+  }
+
+  public ClearFileMetadataRequest deepCopy() {
+    return new ClearFileMetadataRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.fileIds = null;
+  }
+
+  public int getFileIdsSize() {
+    return (this.fileIds == null) ? 0 : this.fileIds.size();
+  }
+
+  public java.util.Iterator<Long> getFileIdsIterator() {
+    return (this.fileIds == null) ? null : this.fileIds.iterator();
+  }
+
+  public void addToFileIds(long elem) {
+    if (this.fileIds == null) {
+      this.fileIds = new ArrayList<Long>();
+    }
+    this.fileIds.add(elem);
+  }
+
+  public List<Long> getFileIds() {
+    return this.fileIds;
+  }
+
+  public void setFileIds(List<Long> fileIds) {
+    this.fileIds = fileIds;
+  }
+
+  public void unsetFileIds() {
+    this.fileIds = null;
+  }
+
+  /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetFileIds() {
+    return this.fileIds != null;
+  }
+
+  public void setFileIdsIsSet(boolean value) {
+    if (!value) {
+      this.fileIds = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FILE_IDS:
+      if (value == null) {
+        unsetFileIds();
+      } else {
+        setFileIds((List<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FILE_IDS:
+      return getFileIds();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FILE_IDS:
+      return isSetFileIds();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ClearFileMetadataRequest)
+      return this.equals((ClearFileMetadataRequest)that);
+    return false;
+  }
+
+  public boolean equals(ClearFileMetadataRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_fileIds = true && this.isSetFileIds();
+    boolean that_present_fileIds = true && that.isSetFileIds();
+    if (this_present_fileIds || that_present_fileIds) {
+      if (!(this_present_fileIds && that_present_fileIds))
+        return false;
+      if (!this.fileIds.equals(that.fileIds))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_fileIds = true && (isSetFileIds());
+    list.add(present_fileIds);
+    if (present_fileIds)
+      list.add(fileIds);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ClearFileMetadataRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFileIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ClearFileMetadataRequest(");
+    boolean first = true;
+
+    sb.append("fileIds:");
+    if (this.fileIds == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fileIds);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFileIds()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ClearFileMetadataRequestStandardSchemeFactory implements SchemeFactory {
+    public ClearFileMetadataRequestStandardScheme getScheme() {
+      return new ClearFileMetadataRequestStandardScheme();
+    }
+  }
+
+  private static class ClearFileMetadataRequestStandardScheme extends StandardScheme<ClearFileMetadataRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FILE_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list824 = iprot.readListBegin();
+                struct.fileIds = new ArrayList<Long>(_list824.size);
+                long _elem825;
+                for (int _i826 = 0; _i826 < _list824.size; ++_i826)
+                {
+                  _elem825 = iprot.readI64();
+                  struct.fileIds.add(_elem825);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFileIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.fileIds != null) {
+        oprot.writeFieldBegin(FILE_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size()));
+          for (long _iter827 : struct.fileIds)
+          {
+            oprot.writeI64(_iter827);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ClearFileMetadataRequestTupleSchemeFactory implements SchemeFactory {
+    public ClearFileMetadataRequestTupleScheme getScheme() {
+      return new ClearFileMetadataRequestTupleScheme();
+    }
+  }
+
+  private static class ClearFileMetadataRequestTupleScheme extends TupleScheme<ClearFileMetadataRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.fileIds.size());
+        for (long _iter828 : struct.fileIds)
+        {
+          oprot.writeI64(_iter828);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list829 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.fileIds = new ArrayList<Long>(_list829.size);
+        long _elem830;
+        for (int _i831 = 0; _i831 < _list829.size; ++_i831)
+        {
+          _elem830 = iprot.readI64();
+          struct.fileIds.add(_elem830);
+        }
+      }
+      struct.setFileIdsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java
new file mode 100644
index 0000000..cef255f
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataResult.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ClearFileMetadataResult implements org.apache.thrift.TBase<ClearFileMetadataResult, ClearFileMetadataResult._Fields>, java.io.Serializable, Cloneable, Comparable<ClearFileMetadataResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ClearFileMetadataResult");
+
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ClearFileMetadataResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ClearFileMetadataResultTupleSchemeFactory());
+  }
+
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ClearFileMetadataResult.class, metaDataMap);
+  }
+
+  public ClearFileMetadataResult() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ClearFileMetadataResult(ClearFileMetadataResult other) {
+  }
+
+  public ClearFileMetadataResult deepCopy() {
+    return new ClearFileMetadataResult(this);
+  }
+
+  @Override
+  public void clear() {
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ClearFileMetadataResult)
+      return this.equals((ClearFileMetadataResult)that);
+    return false;
+  }
+
+  public boolean equals(ClearFileMetadataResult that) {
+    if (that == null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ClearFileMetadataResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ClearFileMetadataResult(");
+    boolean first = true;
+
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ClearFileMetadataResultStandardSchemeFactory implements SchemeFactory {
+    public ClearFileMetadataResultStandardScheme getScheme() {
+      return new ClearFileMetadataResultStandardScheme();
+    }
+  }
+
+  private static class ClearFileMetadataResultStandardScheme extends StandardScheme<ClearFileMetadataResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ClearFileMetadataResultTupleSchemeFactory implements SchemeFactory {
+    public ClearFileMetadataResultTupleScheme getScheme() {
+      return new ClearFileMetadataResultTupleScheme();
+    }
+  }
+
+  private static class ClearFileMetadataResultTupleScheme extends TupleScheme<ClearFileMetadataResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+    }
+  }
+
+}
+


[84/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
index 0000000,38d4f64..f9d48c8
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java
@@@ -1,0 -1,2283 +1,2483 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Table implements org.apache.thrift.TBase<Table, Table._Fields>, java.io.Serializable, Cloneable, Comparable<Table> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Table");
+ 
+   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)1);
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField OWNER_FIELD_DESC = new org.apache.thrift.protocol.TField("owner", org.apache.thrift.protocol.TType.STRING, (short)3);
+   private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)4);
+   private static final org.apache.thrift.protocol.TField LAST_ACCESS_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAccessTime", org.apache.thrift.protocol.TType.I32, (short)5);
+   private static final org.apache.thrift.protocol.TField RETENTION_FIELD_DESC = new org.apache.thrift.protocol.TField("retention", org.apache.thrift.protocol.TType.I32, (short)6);
+   private static final org.apache.thrift.protocol.TField SD_FIELD_DESC = new org.apache.thrift.protocol.TField("sd", org.apache.thrift.protocol.TType.STRUCT, (short)7);
+   private static final org.apache.thrift.protocol.TField PARTITION_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionKeys", org.apache.thrift.protocol.TType.LIST, (short)8);
+   private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)9);
+   private static final org.apache.thrift.protocol.TField VIEW_ORIGINAL_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewOriginalText", org.apache.thrift.protocol.TType.STRING, (short)10);
+   private static final org.apache.thrift.protocol.TField VIEW_EXPANDED_TEXT_FIELD_DESC = new org.apache.thrift.protocol.TField("viewExpandedText", org.apache.thrift.protocol.TType.STRING, (short)11);
+   private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12);
+   private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13);
+   private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14);
+   private static final org.apache.thrift.protocol.TField REWRITE_ENABLED_FIELD_DESC = new org.apache.thrift.protocol.TField("rewriteEnabled", org.apache.thrift.protocol.TType.BOOL, (short)15);
+   private static final org.apache.thrift.protocol.TField CREATION_METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("creationMetadata", org.apache.thrift.protocol.TType.STRUCT, (short)16);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)17);
+   private static final org.apache.thrift.protocol.TField OWNER_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerType", org.apache.thrift.protocol.TType.I32, (short)18);
++  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)19);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)20);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new TableStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new TableTupleSchemeFactory());
+   }
+ 
+   private String tableName; // required
+   private String dbName; // required
+   private String owner; // required
+   private int createTime; // required
+   private int lastAccessTime; // required
+   private int retention; // required
+   private StorageDescriptor sd; // required
+   private List<FieldSchema> partitionKeys; // required
+   private Map<String,String> parameters; // required
+   private String viewOriginalText; // required
+   private String viewExpandedText; // required
+   private String tableType; // required
+   private PrincipalPrivilegeSet privileges; // optional
+   private boolean temporary; // optional
+   private boolean rewriteEnabled; // optional
+   private CreationMetadata creationMetadata; // optional
+   private String catName; // optional
+   private PrincipalType ownerType; // optional
++  private long writeId; // optional
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     TABLE_NAME((short)1, "tableName"),
+     DB_NAME((short)2, "dbName"),
+     OWNER((short)3, "owner"),
+     CREATE_TIME((short)4, "createTime"),
+     LAST_ACCESS_TIME((short)5, "lastAccessTime"),
+     RETENTION((short)6, "retention"),
+     SD((short)7, "sd"),
+     PARTITION_KEYS((short)8, "partitionKeys"),
+     PARAMETERS((short)9, "parameters"),
+     VIEW_ORIGINAL_TEXT((short)10, "viewOriginalText"),
+     VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"),
+     TABLE_TYPE((short)12, "tableType"),
+     PRIVILEGES((short)13, "privileges"),
+     TEMPORARY((short)14, "temporary"),
+     REWRITE_ENABLED((short)15, "rewriteEnabled"),
+     CREATION_METADATA((short)16, "creationMetadata"),
+     CAT_NAME((short)17, "catName"),
+     /**
+      * 
+      * @see PrincipalType
+      */
 -    OWNER_TYPE((short)18, "ownerType");
++    OWNER_TYPE((short)18, "ownerType"),
++    WRITE_ID((short)19, "writeId"),
++    IS_STATS_COMPLIANT((short)20, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // TABLE_NAME
+           return TABLE_NAME;
+         case 2: // DB_NAME
+           return DB_NAME;
+         case 3: // OWNER
+           return OWNER;
+         case 4: // CREATE_TIME
+           return CREATE_TIME;
+         case 5: // LAST_ACCESS_TIME
+           return LAST_ACCESS_TIME;
+         case 6: // RETENTION
+           return RETENTION;
+         case 7: // SD
+           return SD;
+         case 8: // PARTITION_KEYS
+           return PARTITION_KEYS;
+         case 9: // PARAMETERS
+           return PARAMETERS;
+         case 10: // VIEW_ORIGINAL_TEXT
+           return VIEW_ORIGINAL_TEXT;
+         case 11: // VIEW_EXPANDED_TEXT
+           return VIEW_EXPANDED_TEXT;
+         case 12: // TABLE_TYPE
+           return TABLE_TYPE;
+         case 13: // PRIVILEGES
+           return PRIVILEGES;
+         case 14: // TEMPORARY
+           return TEMPORARY;
+         case 15: // REWRITE_ENABLED
+           return REWRITE_ENABLED;
+         case 16: // CREATION_METADATA
+           return CREATION_METADATA;
+         case 17: // CAT_NAME
+           return CAT_NAME;
+         case 18: // OWNER_TYPE
+           return OWNER_TYPE;
++        case 19: // WRITE_ID
++          return WRITE_ID;
++        case 20: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
+   private static final int __CREATETIME_ISSET_ID = 0;
+   private static final int __LASTACCESSTIME_ISSET_ID = 1;
+   private static final int __RETENTION_ISSET_ID = 2;
+   private static final int __TEMPORARY_ISSET_ID = 3;
+   private static final int __REWRITEENABLED_ISSET_ID = 4;
++  private static final int __WRITEID_ISSET_ID = 5;
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 6;
+   private byte __isset_bitfield = 0;
 -  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE};
++  private static final _Fields optionals[] = {_Fields.PRIVILEGES,_Fields.TEMPORARY,_Fields.REWRITE_ENABLED,_Fields.CREATION_METADATA,_Fields.CAT_NAME,_Fields.OWNER_TYPE,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.OWNER, new org.apache.thrift.meta_data.FieldMetaData("owner", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+     tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+     tmpMap.put(_Fields.RETENTION, new org.apache.thrift.meta_data.FieldMetaData("retention", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+     tmpMap.put(_Fields.SD, new org.apache.thrift.meta_data.FieldMetaData("sd", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, StorageDescriptor.class)));
+     tmpMap.put(_Fields.PARTITION_KEYS, new org.apache.thrift.meta_data.FieldMetaData("partitionKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class))));
+     tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+     tmpMap.put(_Fields.VIEW_ORIGINAL_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewOriginalText", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.VIEW_EXPANDED_TEXT, new org.apache.thrift.meta_data.FieldMetaData("viewExpandedText", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TABLE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("tableType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
+     tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     tmpMap.put(_Fields.REWRITE_ENABLED, new org.apache.thrift.meta_data.FieldMetaData("rewriteEnabled", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     tmpMap.put(_Fields.CREATION_METADATA, new org.apache.thrift.meta_data.FieldMetaData("creationMetadata", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT        , "CreationMetadata")));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.OWNER_TYPE, new org.apache.thrift.meta_data.FieldMetaData("ownerType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
++    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap);
+   }
+ 
+   public Table() {
+     this.temporary = false;
+ 
+     this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER;
+ 
++    this.writeId = -1L;
++
+   }
+ 
+   public Table(
+     String tableName,
+     String dbName,
+     String owner,
+     int createTime,
+     int lastAccessTime,
+     int retention,
+     StorageDescriptor sd,
+     List<FieldSchema> partitionKeys,
+     Map<String,String> parameters,
+     String viewOriginalText,
+     String viewExpandedText,
+     String tableType)
+   {
+     this();
+     this.tableName = tableName;
+     this.dbName = dbName;
+     this.owner = owner;
+     this.createTime = createTime;
+     setCreateTimeIsSet(true);
+     this.lastAccessTime = lastAccessTime;
+     setLastAccessTimeIsSet(true);
+     this.retention = retention;
+     setRetentionIsSet(true);
+     this.sd = sd;
+     this.partitionKeys = partitionKeys;
+     this.parameters = parameters;
+     this.viewOriginalText = viewOriginalText;
+     this.viewExpandedText = viewExpandedText;
+     this.tableType = tableType;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public Table(Table other) {
+     __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetTableName()) {
+       this.tableName = other.tableName;
+     }
+     if (other.isSetDbName()) {
+       this.dbName = other.dbName;
+     }
+     if (other.isSetOwner()) {
+       this.owner = other.owner;
+     }
+     this.createTime = other.createTime;
+     this.lastAccessTime = other.lastAccessTime;
+     this.retention = other.retention;
+     if (other.isSetSd()) {
+       this.sd = new StorageDescriptor(other.sd);
+     }
+     if (other.isSetPartitionKeys()) {
+       List<FieldSchema> __this__partitionKeys = new ArrayList<FieldSchema>(other.partitionKeys.size());
+       for (FieldSchema other_element : other.partitionKeys) {
+         __this__partitionKeys.add(new FieldSchema(other_element));
+       }
+       this.partitionKeys = __this__partitionKeys;
+     }
+     if (other.isSetParameters()) {
+       Map<String,String> __this__parameters = new HashMap<String,String>(other.parameters);
+       this.parameters = __this__parameters;
+     }
+     if (other.isSetViewOriginalText()) {
+       this.viewOriginalText = other.viewOriginalText;
+     }
+     if (other.isSetViewExpandedText()) {
+       this.viewExpandedText = other.viewExpandedText;
+     }
+     if (other.isSetTableType()) {
+       this.tableType = other.tableType;
+     }
+     if (other.isSetPrivileges()) {
+       this.privileges = new PrincipalPrivilegeSet(other.privileges);
+     }
+     this.temporary = other.temporary;
+     this.rewriteEnabled = other.rewriteEnabled;
+     if (other.isSetCreationMetadata()) {
+       this.creationMetadata = other.creationMetadata;
+     }
+     if (other.isSetCatName()) {
+       this.catName = other.catName;
+     }
+     if (other.isSetOwnerType()) {
+       this.ownerType = other.ownerType;
+     }
++    this.writeId = other.writeId;
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public Table deepCopy() {
+     return new Table(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.tableName = null;
+     this.dbName = null;
+     this.owner = null;
+     setCreateTimeIsSet(false);
+     this.createTime = 0;
+     setLastAccessTimeIsSet(false);
+     this.lastAccessTime = 0;
+     setRetentionIsSet(false);
+     this.retention = 0;
+     this.sd = null;
+     this.partitionKeys = null;
+     this.parameters = null;
+     this.viewOriginalText = null;
+     this.viewExpandedText = null;
+     this.tableType = null;
+     this.privileges = null;
+     this.temporary = false;
+ 
+     setRewriteEnabledIsSet(false);
+     this.rewriteEnabled = false;
+     this.creationMetadata = null;
+     this.catName = null;
+     this.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.USER;
+ 
++    this.writeId = -1L;
++
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public String getTableName() {
+     return this.tableName;
+   }
+ 
+   public void setTableName(String tableName) {
+     this.tableName = tableName;
+   }
+ 
+   public void unsetTableName() {
+     this.tableName = null;
+   }
+ 
+   /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTableName() {
+     return this.tableName != null;
+   }
+ 
+   public void setTableNameIsSet(boolean value) {
+     if (!value) {
+       this.tableName = null;
+     }
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = dbName;
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getOwner() {
+     return this.owner;
+   }
+ 
+   public void setOwner(String owner) {
+     this.owner = owner;
+   }
+ 
+   public void unsetOwner() {
+     this.owner = null;
+   }
+ 
+   /** Returns true if field owner is set (has been assigned a value) and false otherwise */
+   public boolean isSetOwner() {
+     return this.owner != null;
+   }
+ 
+   public void setOwnerIsSet(boolean value) {
+     if (!value) {
+       this.owner = null;
+     }
+   }
+ 
+   public int getCreateTime() {
+     return this.createTime;
+   }
+ 
+   public void setCreateTime(int createTime) {
+     this.createTime = createTime;
+     setCreateTimeIsSet(true);
+   }
+ 
+   public void unsetCreateTime() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+   }
+ 
+   /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+   public boolean isSetCreateTime() {
+     return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+   }
+ 
+   public void setCreateTimeIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+   }
+ 
+   public int getLastAccessTime() {
+     return this.lastAccessTime;
+   }
+ 
+   public void setLastAccessTime(int lastAccessTime) {
+     this.lastAccessTime = lastAccessTime;
+     setLastAccessTimeIsSet(true);
+   }
+ 
+   public void unsetLastAccessTime() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+   }
+ 
+   /** Returns true if field lastAccessTime is set (has been assigned a value) and false otherwise */
+   public boolean isSetLastAccessTime() {
+     return EncodingUtils.testBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+   }
+ 
+   public void setLastAccessTimeIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID, value);
+   }
+ 
+   public int getRetention() {
+     return this.retention;
+   }
+ 
+   public void setRetention(int retention) {
+     this.retention = retention;
+     setRetentionIsSet(true);
+   }
+ 
+   public void unsetRetention() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __RETENTION_ISSET_ID);
+   }
+ 
+   /** Returns true if field retention is set (has been assigned a value) and false otherwise */
+   public boolean isSetRetention() {
+     return EncodingUtils.testBit(__isset_bitfield, __RETENTION_ISSET_ID);
+   }
+ 
+   public void setRetentionIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __RETENTION_ISSET_ID, value);
+   }
+ 
+   public StorageDescriptor getSd() {
+     return this.sd;
+   }
+ 
+   public void setSd(StorageDescriptor sd) {
+     this.sd = sd;
+   }
+ 
+   public void unsetSd() {
+     this.sd = null;
+   }
+ 
+   /** Returns true if field sd is set (has been assigned a value) and false otherwise */
+   public boolean isSetSd() {
+     return this.sd != null;
+   }
+ 
+   public void setSdIsSet(boolean value) {
+     if (!value) {
+       this.sd = null;
+     }
+   }
+ 
+   public int getPartitionKeysSize() {
+     return (this.partitionKeys == null) ? 0 : this.partitionKeys.size();
+   }
+ 
+   public java.util.Iterator<FieldSchema> getPartitionKeysIterator() {
+     return (this.partitionKeys == null) ? null : this.partitionKeys.iterator();
+   }
+ 
+   public void addToPartitionKeys(FieldSchema elem) {
+     if (this.partitionKeys == null) {
+       this.partitionKeys = new ArrayList<FieldSchema>();
+     }
+     this.partitionKeys.add(elem);
+   }
+ 
+   public List<FieldSchema> getPartitionKeys() {
+     return this.partitionKeys;
+   }
+ 
+   public void setPartitionKeys(List<FieldSchema> partitionKeys) {
+     this.partitionKeys = partitionKeys;
+   }
+ 
+   public void unsetPartitionKeys() {
+     this.partitionKeys = null;
+   }
+ 
+   /** Returns true if field partitionKeys is set (has been assigned a value) and false otherwise */
+   public boolean isSetPartitionKeys() {
+     return this.partitionKeys != null;
+   }
+ 
+   public void setPartitionKeysIsSet(boolean value) {
+     if (!value) {
+       this.partitionKeys = null;
+     }
+   }
+ 
+   public int getParametersSize() {
+     return (this.parameters == null) ? 0 : this.parameters.size();
+   }
+ 
+   public void putToParameters(String key, String val) {
+     if (this.parameters == null) {
+       this.parameters = new HashMap<String,String>();
+     }
+     this.parameters.put(key, val);
+   }
+ 
+   public Map<String,String> getParameters() {
+     return this.parameters;
+   }
+ 
+   public void setParameters(Map<String,String> parameters) {
+     this.parameters = parameters;
+   }
+ 
+   public void unsetParameters() {
+     this.parameters = null;
+   }
+ 
+   /** Returns true if field parameters is set (has been assigned a value) and false otherwise */
+   public boolean isSetParameters() {
+     return this.parameters != null;
+   }
+ 
+   public void setParametersIsSet(boolean value) {
+     if (!value) {
+       this.parameters = null;
+     }
+   }
+ 
+   public String getViewOriginalText() {
+     return this.viewOriginalText;
+   }
+ 
+   public void setViewOriginalText(String viewOriginalText) {
+     this.viewOriginalText = viewOriginalText;
+   }
+ 
+   public void unsetViewOriginalText() {
+     this.viewOriginalText = null;
+   }
+ 
+   /** Returns true if field viewOriginalText is set (has been assigned a value) and false otherwise */
+   public boolean isSetViewOriginalText() {
+     return this.viewOriginalText != null;
+   }
+ 
+   public void setViewOriginalTextIsSet(boolean value) {
+     if (!value) {
+       this.viewOriginalText = null;
+     }
+   }
+ 
+   public String getViewExpandedText() {
+     return this.viewExpandedText;
+   }
+ 
+   public void setViewExpandedText(String viewExpandedText) {
+     this.viewExpandedText = viewExpandedText;
+   }
+ 
+   public void unsetViewExpandedText() {
+     this.viewExpandedText = null;
+   }
+ 
+   /** Returns true if field viewExpandedText is set (has been assigned a value) and false otherwise */
+   public boolean isSetViewExpandedText() {
+     return this.viewExpandedText != null;
+   }
+ 
+   public void setViewExpandedTextIsSet(boolean value) {
+     if (!value) {
+       this.viewExpandedText = null;
+     }
+   }
+ 
+   public String getTableType() {
+     return this.tableType;
+   }
+ 
+   public void setTableType(String tableType) {
+     this.tableType = tableType;
+   }
+ 
+   public void unsetTableType() {
+     this.tableType = null;
+   }
+ 
+   /** Returns true if field tableType is set (has been assigned a value) and false otherwise */
+   public boolean isSetTableType() {
+     return this.tableType != null;
+   }
+ 
+   public void setTableTypeIsSet(boolean value) {
+     if (!value) {
+       this.tableType = null;
+     }
+   }
+ 
+   public PrincipalPrivilegeSet getPrivileges() {
+     return this.privileges;
+   }
+ 
+   public void setPrivileges(PrincipalPrivilegeSet privileges) {
+     this.privileges = privileges;
+   }
+ 
+   public void unsetPrivileges() {
+     this.privileges = null;
+   }
+ 
+   /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+   public boolean isSetPrivileges() {
+     return this.privileges != null;
+   }
+ 
+   public void setPrivilegesIsSet(boolean value) {
+     if (!value) {
+       this.privileges = null;
+     }
+   }
+ 
+   public boolean isTemporary() {
+     return this.temporary;
+   }
+ 
+   public void setTemporary(boolean temporary) {
+     this.temporary = temporary;
+     setTemporaryIsSet(true);
+   }
+ 
+   public void unsetTemporary() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TEMPORARY_ISSET_ID);
+   }
+ 
+   /** Returns true if field temporary is set (has been assigned a value) and false otherwise */
+   public boolean isSetTemporary() {
+     return EncodingUtils.testBit(__isset_bitfield, __TEMPORARY_ISSET_ID);
+   }
+ 
+   public void setTemporaryIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value);
+   }
+ 
+   public boolean isRewriteEnabled() {
+     return this.rewriteEnabled;
+   }
+ 
+   public void setRewriteEnabled(boolean rewriteEnabled) {
+     this.rewriteEnabled = rewriteEnabled;
+     setRewriteEnabledIsSet(true);
+   }
+ 
+   public void unsetRewriteEnabled() {
+     __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID);
+   }
+ 
+   /** Returns true if field rewriteEnabled is set (has been assigned a value) and false otherwise */
+   public boolean isSetRewriteEnabled() {
+     return EncodingUtils.testBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID);
+   }
+ 
+   public void setRewriteEnabledIsSet(boolean value) {
+     __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REWRITEENABLED_ISSET_ID, value);
+   }
+ 
+   public CreationMetadata getCreationMetadata() {
+     return this.creationMetadata;
+   }
+ 
+   public void setCreationMetadata(CreationMetadata creationMetadata) {
+     this.creationMetadata = creationMetadata;
+   }
+ 
+   public void unsetCreationMetadata() {
+     this.creationMetadata = null;
+   }
+ 
+   /** Returns true if field creationMetadata is set (has been assigned a value) and false otherwise */
+   public boolean isSetCreationMetadata() {
+     return this.creationMetadata != null;
+   }
+ 
+   public void setCreationMetadataIsSet(boolean value) {
+     if (!value) {
+       this.creationMetadata = null;
+     }
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = catName;
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
+   /**
+    * 
+    * @see PrincipalType
+    */
+   public PrincipalType getOwnerType() {
+     return this.ownerType;
+   }
+ 
+   /**
+    * 
+    * @see PrincipalType
+    */
+   public void setOwnerType(PrincipalType ownerType) {
+     this.ownerType = ownerType;
+   }
+ 
+   public void unsetOwnerType() {
+     this.ownerType = null;
+   }
+ 
+   /** Returns true if field ownerType is set (has been assigned a value) and false otherwise */
+   public boolean isSetOwnerType() {
+     return this.ownerType != null;
+   }
+ 
+   public void setOwnerTypeIsSet(boolean value) {
+     if (!value) {
+       this.ownerType = null;
+     }
+   }
+ 
++  public long getWriteId() {
++    return this.writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++    setWriteIdIsSet(true);
++  }
++
++  public void unsetWriteId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
++  public boolean isSetWriteId() {
++    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  public void setWriteIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
++  }
++
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case TABLE_NAME:
+       if (value == null) {
+         unsetTableName();
+       } else {
+         setTableName((String)value);
+       }
+       break;
+ 
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case OWNER:
+       if (value == null) {
+         unsetOwner();
+       } else {
+         setOwner((String)value);
+       }
+       break;
+ 
+     case CREATE_TIME:
+       if (value == null) {
+         unsetCreateTime();
+       } else {
+         setCreateTime((Integer)value);
+       }
+       break;
+ 
+     case LAST_ACCESS_TIME:
+       if (value == null) {
+         unsetLastAccessTime();
+       } else {
+         setLastAccessTime((Integer)value);
+       }
+       break;
+ 
+     case RETENTION:
+       if (value == null) {
+         unsetRetention();
+       } else {
+         setRetention((Integer)value);
+       }
+       break;
+ 
+     case SD:
+       if (value == null) {
+         unsetSd();
+       } else {
+         setSd((StorageDescriptor)value);
+       }
+       break;
+ 
+     case PARTITION_KEYS:
+       if (value == null) {
+         unsetPartitionKeys();
+       } else {
+         setPartitionKeys((List<FieldSchema>)value);
+       }
+       break;
+ 
+     case PARAMETERS:
+       if (value == null) {
+         unsetParameters();
+       } else {
+         setParameters((Map<String,String>)value);
+       }
+       break;
+ 
+     case VIEW_ORIGINAL_TEXT:
+       if (value == null) {
+         unsetViewOriginalText();
+       } else {
+         setViewOriginalText((String)value);
+       }
+       break;
+ 
+     case VIEW_EXPANDED_TEXT:
+       if (value == null) {
+         unsetViewExpandedText();
+       } else {
+         setViewExpandedText((String)value);
+       }
+       break;
+ 
+     case TABLE_TYPE:
+       if (value == null) {
+         unsetTableType();
+       } else {
+         setTableType((String)value);
+       }
+       break;
+ 
+     case PRIVILEGES:
+       if (value == null) {
+         unsetPrivileges();
+       } else {
+         setPrivileges((PrincipalPrivilegeSet)value);
+       }
+       break;
+ 
+     case TEMPORARY:
+       if (value == null) {
+         unsetTemporary();
+       } else {
+         setTemporary((Boolean)value);
+       }
+       break;
+ 
+     case REWRITE_ENABLED:
+       if (value == null) {
+         unsetRewriteEnabled();
+       } else {
+         setRewriteEnabled((Boolean)value);
+       }
+       break;
+ 
+     case CREATION_METADATA:
+       if (value == null) {
+         unsetCreationMetadata();
+       } else {
+         setCreationMetadata((CreationMetadata)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
+     case OWNER_TYPE:
+       if (value == null) {
+         unsetOwnerType();
+       } else {
+         setOwnerType((PrincipalType)value);
+       }
+       break;
+ 
++    case WRITE_ID:
++      if (value == null) {
++        unsetWriteId();
++      } else {
++        setWriteId((Long)value);
++      }
++      break;
++
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case TABLE_NAME:
+       return getTableName();
+ 
+     case DB_NAME:
+       return getDbName();
+ 
+     case OWNER:
+       return getOwner();
+ 
+     case CREATE_TIME:
+       return getCreateTime();
+ 
+     case LAST_ACCESS_TIME:
+       return getLastAccessTime();
+ 
+     case RETENTION:
+       return getRetention();
+ 
+     case SD:
+       return getSd();
+ 
+     case PARTITION_KEYS:
+       return getPartitionKeys();
+ 
+     case PARAMETERS:
+       return getParameters();
+ 
+     case VIEW_ORIGINAL_TEXT:
+       return getViewOriginalText();
+ 
+     case VIEW_EXPANDED_TEXT:
+       return getViewExpandedText();
+ 
+     case TABLE_TYPE:
+       return getTableType();
+ 
+     case PRIVILEGES:
+       return getPrivileges();
+ 
+     case TEMPORARY:
+       return isTemporary();
+ 
+     case REWRITE_ENABLED:
+       return isRewriteEnabled();
+ 
+     case CREATION_METADATA:
+       return getCreationMetadata();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
+     case OWNER_TYPE:
+       return getOwnerType();
+ 
++    case WRITE_ID:
++      return getWriteId();
++
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case TABLE_NAME:
+       return isSetTableName();
+     case DB_NAME:
+       return isSetDbName();
+     case OWNER:
+       return isSetOwner();
+     case CREATE_TIME:
+       return isSetCreateTime();
+     case LAST_ACCESS_TIME:
+       return isSetLastAccessTime();
+     case RETENTION:
+       return isSetRetention();
+     case SD:
+       return isSetSd();
+     case PARTITION_KEYS:
+       return isSetPartitionKeys();
+     case PARAMETERS:
+       return isSetParameters();
+     case VIEW_ORIGINAL_TEXT:
+       return isSetViewOriginalText();
+     case VIEW_EXPANDED_TEXT:
+       return isSetViewExpandedText();
+     case TABLE_TYPE:
+       return isSetTableType();
+     case PRIVILEGES:
+       return isSetPrivileges();
+     case TEMPORARY:
+       return isSetTemporary();
+     case REWRITE_ENABLED:
+       return isSetRewriteEnabled();
+     case CREATION_METADATA:
+       return isSetCreationMetadata();
+     case CAT_NAME:
+       return isSetCatName();
+     case OWNER_TYPE:
+       return isSetOwnerType();
++    case WRITE_ID:
++      return isSetWriteId();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof Table)
+       return this.equals((Table)that);
+     return false;
+   }
+ 
+   public boolean equals(Table that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_tableName = true && this.isSetTableName();
+     boolean that_present_tableName = true && that.isSetTableName();
+     if (this_present_tableName || that_present_tableName) {
+       if (!(this_present_tableName && that_present_tableName))
+         return false;
+       if (!this.tableName.equals(that.tableName))
+         return false;
+     }
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_owner = true && this.isSetOwner();
+     boolean that_present_owner = true && that.isSetOwner();
+     if (this_present_owner || that_present_owner) {
+       if (!(this_present_owner && that_present_owner))
+         return false;
+       if (!this.owner.equals(that.owner))
+         return false;
+     }
+ 
+     boolean this_present_createTime = true;
+     boolean that_present_createTime = true;
+     if (this_present_createTime || that_present_createTime) {
+       if (!(this_present_createTime && that_present_createTime))
+         return false;
+       if (this.createTime != that.createTime)
+         return false;
+     }
+ 
+     boolean this_present_lastAccessTime = true;
+     boolean that_present_lastAccessTime = true;
+     if (this_present_lastAccessTime || that_present_lastAccessTime) {
+       if (!(this_present_lastAccessTime && that_present_lastAccessTime))
+         return false;
+       if (this.lastAccessTime != that.lastAccessTime)
+         return false;
+     }
+ 
+     boolean this_present_retention = true;
+     boolean that_present_retention = true;
+     if (this_present_retention || that_present_retention) {
+       if (!(this_present_retention && that_present_retention))
+         return false;
+       if (this.retention != that.retention)
+         return false;
+     }
+ 
+     boolean this_present_sd = true && this.isSetSd();
+     boolean that_present_sd = true && that.isSetSd();
+     if (this_present_sd || that_present_sd) {
+       if (!(this_present_sd && that_present_sd))
+         return false;
+       if (!this.sd.equals(that.sd))
+         return false;
+     }
+ 
+     boolean this_present_partitionKeys = true && this.isSetPartitionKeys();
+     boolean that_present_partitionKeys = true && that.isSetPartitionKeys();
+     if (this_present_partitionKeys || that_present_partitionKeys) {
+       if (!(this_present_partitionKeys && that_present_partitionKeys))
+         return false;
+       if (!this.partitionKeys.equals(that.partitionKeys))
+         return false;
+     }
+ 
+     boolean this_present_parameters = true && this.isSetParameters();
+     boolean that_present_parameters = true && that.isSetParameters();
+     if (this_present_parameters || that_present_parameters) {
+       if (!(this_present_parameters && that_present_parameters))
+         return false;
+       if (!this.parameters.equals(that.parameters))
+         return false;
+     }
+ 
+     boolean this_present_viewOriginalText = true && this.isSetViewOriginalText();
+     boolean that_present_viewOriginalText = true && that.isSetViewOriginalText();
+     if (this_present_viewOriginalText || that_present_viewOriginalText) {
+       if (!(this_present_viewOriginalText && that_present_viewOriginalText))
+         return false;
+       if (!this.viewOriginalText.equals(that.viewOriginalText))
+         return false;
+     }
+ 
+     boolean this_present_viewExpandedText = true && this.isSetViewExpandedText();
+     boolean that_present_viewExpandedText = true && that.isSetViewExpandedText();
+     if (this_present_viewExpandedText || that_present_viewExpandedText) {
+       if (!(this_present_viewExpandedText && that_present_viewExpandedText))
+         return false;
+       if (!this.viewExpandedText.equals(that.viewExpandedText))
+         return false;
+     }
+ 
+     boolean this_present_tableType = true && this.isSetTableType();
+     boolean that_present_tableType = true && that.isSetTableType();
+     if (this_present_tableType || that_present_tableType) {
+       if (!(this_present_tableType && that_present_tableType))
+         return false;
+       if (!this.tableType.equals(that.tableType))
+         return false;
+     }
+ 
+     boolean this_present_privileges = true && this.isSetPrivileges();
+     boolean that_present_privileges = true && that.isSetPrivileges();
+     if (this_present_privileges || that_present_privileges) {
+       if (!(this_present_privileges && that_present_privileges))
+         return false;
+       if (!this.privileges.equals(that.privileges))
+         return false;
+     }
+ 
+     boolean this_present_temporary = true && this.isSetTemporary();
+     boolean that_present_temporary = true && that.isSetTemporary();
+     if (this_present_temporary || that_present_temporary) {
+       if (!(this_present_temporary && that_present_temporary))
+         return false;
+       if (this.temporary != that.temporary)
+         return false;
+     }
+ 
+     boolean this_present_rewriteEnabled = true && this.isSetRewriteEnabled();
+     boolean that_present_rewriteEnabled = true && that.isSetRewriteEnabled();
+     if (this_present_rewriteEnabled || that_present_rewriteEnabled) {
+       if (!(this_present_rewriteEnabled && that_present_rewriteEnabled))
+         return false;
+       if (this.rewriteEnabled != that.rewriteEnabled)
+         return false;
+     }
+ 
+     boolean this_present_creationMetadata = true && this.isSetCreationMetadata();
+     boolean that_present_creationMetadata = true && that.isSetCreationMetadata();
+     if (this_present_creationMetadata || that_present_creationMetadata) {
+       if (!(this_present_creationMetadata && that_present_creationMetadata))
+         return false;
+       if (!this.creationMetadata.equals(that.creationMetadata))
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
+     boolean this_present_ownerType = true && this.isSetOwnerType();
+     boolean that_present_ownerType = true && that.isSetOwnerType();
+     if (this_present_ownerType || that_present_ownerType) {
+       if (!(this_present_ownerType && that_present_ownerType))
+         return false;
+       if (!this.ownerType.equals(that.ownerType))
+         return false;
+     }
+ 
++    boolean this_present_writeId = true && this.isSetWriteId();
++    boolean that_present_writeId = true && that.isSetWriteId();
++    if (this_present_writeId || that_present_writeId) {
++      if (!(this_present_writeId && that_present_writeId))
++        return false;
++      if (this.writeId != that.writeId)
++        return false;
++    }
++
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_tableName = true && (isSetTableName());
+     list.add(present_tableName);
+     if (present_tableName)
+       list.add(tableName);
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_owner = true && (isSetOwner());
+     list.add(present_owner);
+     if (present_owner)
+       list.add(owner);
+ 
+     boolean present_createTime = true;
+     list.add(present_createTime);
+     if (present_createTime)
+       list.add(createTime);
+ 
+     boolean present_lastAccessTime = true;
+     list.add(present_lastAccessTime);
+     if (present_lastAccessTime)
+       list.add(lastAccessTime);
+ 
+     boolean present_retention = true;
+     list.add(present_retention);
+     if (present_retention)
+       list.add(retention);
+ 
+     boolean present_sd = true && (isSetSd());
+     list.add(present_sd);
+     if (present_sd)
+       list.add(sd);
+ 
+     boolean present_partitionKeys = true && (isSetPartitionKeys());
+     list.add(present_partitionKeys);
+     if (present_partitionKeys)
+       list.add(partitionKeys);
+ 
+     boolean present_parameters = true && (isSetParameters());
+     list.add(present_parameters);
+     if (present_parameters)
+       list.add(parameters);
+ 
+     boolean present_viewOriginalText = true && (isSetViewOriginalText());
+     list.add(present_viewOriginalText);
+     if (present_viewOriginalText)
+       list.add(viewOriginalText);
+ 
+     boolean present_viewExpandedText = true && (isSetViewExpandedText());
+     list.add(present_viewExpandedText);
+     if (present_viewExpandedText)
+       list.add(viewExpandedText);
+ 
+     boolean present_tableType = true && (isSetTableType());
+     list.add(present_tableType);
+     if (present_tableType)
+       list.add(tableType);
+ 
+     boolean present_privileges = true && (isSetPrivileges());
+     list.add(present_privileges);
+     if (present_privileges)
+       list.add(privileges);
+ 
+     boolean present_temporary = true && (isSetTemporary());
+     list.add(present_temporary);
+     if (present_temporary)
+       list.add(temporary);
+ 
+     boolean present_rewriteEnabled = true && (isSetRewriteEnabled());
+     list.add(present_rewriteEnabled);
+     if (present_rewriteEnabled)
+       list.add(rewriteEnabled);
+ 
+     boolean present_creationMetadata = true && (isSetCreationMetadata());
+     list.add(present_creationMetadata);
+     if (present_creationMetadata)
+       list.add(creationMetadata);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
+     boolean present_ownerType = true && (isSetOwnerType());
+     list.add(present_ownerType);
+     if (present_ownerType)
+       list.add(ownerType.getValue());
+ 
++    boolean present_writeId = true && (isSetWriteId());
++    list.add(present_writeId);
++    if (present_writeId)
++      list.add(writeId);
++
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(Table other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTableName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetOwner()).compareTo(other.isSetOwner());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetOwner()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.owner, other.owner);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCreateTime()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetLastAccessTime()).compareTo(other.isSetLastAccessTime());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetLastAccessTime()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lastAccessTime, other.lastAccessTime);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetRetention()).compareTo(other.isSetRetention());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetRetention()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.retention, other.retention);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetSd()).compareTo(other.isSetSd());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetSd()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sd, other.sd);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetPartitionKeys()).compareTo(other.isSetPartitionKeys());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPartitionKeys()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionKeys, other.partitionKeys);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetParameters()).compareTo(other.isSetParameters());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetParameters()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, other.parameters);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetViewOriginalText()).compareTo(other.isSetViewOriginalText());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetViewOriginalText()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewOriginalText, other.viewOriginalText);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetViewExpandedText()).compareTo(other.isSetViewExpandedText());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetViewExpandedText()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.viewExpandedText, other.viewExpandedText);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTableType()).compareTo(other.isSetTableType());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTableType()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableType, other.tableType);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPrivileges()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTemporary()).compareTo(other.isSetTemporary());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTemporary()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.temporary, other.temporary);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetRewriteEnabled()).compareTo(other.isSetRewriteEnabled());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetRewriteEnabled()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rewriteEnabled, other.rewriteEnabled);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCreationMetadata()).compareTo(other.isSetCreationMetadata());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCreationMetadata()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.creationMetadata, other.creationMetadata);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetOwnerType()).compareTo(other.isSetOwnerType());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetOwnerType()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ownerType, other.ownerType);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetWriteId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("Table(");
+     boolean first = true;
+ 
+     sb.append("tableName:");
+     if (this.tableName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tableName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("owner:");
+     if (this.owner == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.owner);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("createTime:");
+     sb.append(this.createTime);
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("lastAccessTime:");
+     sb.append(this.lastAccessTime);
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("retention:");
+     sb.append(this.retention);
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("sd:");
+     if (this.sd == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.sd);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("partitionKeys:");
+     if (this.partitionKeys == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.partitionKeys);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("parameters:");
+     if (this.parameters == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.parameters);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("viewOriginalText:");
+     if (this.viewOriginalText == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.viewOriginalText);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("viewExpandedText:");
+     if (this.viewExpandedText == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.viewExpandedText);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tableType:");
+     if (this.tableType == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tableType);
+     }
+     first = false;
+     if (isSetPrivileges()) {
+       if (!first) sb.append(", ");
+       sb.append("privileges:");
+       if (this.privileges == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.privileges);
+       }
+       first = false;
+     }
+     if (isSetTemporary()) {
+       if (!first) sb.append(", ");
+       sb.append("temporary:");
+       sb.append(this.temporary);
+       first = false;
+     }
+     if (isSetRewriteEnabled()) {
+       if (!first) sb.append(", ");
+       sb.append("rewriteEnabled:");
+       sb.append(this.rewriteEnabled);
+       first = false;
+     }
+     if (isSetCreationMetadata()) {
+       if (!first) sb.append(", ");
+       sb.append("creationMetadata:");
+       if (this.creationMetadata == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.creationMetadata);
+       }
+       first = false;
+     }
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
+     if (isSetOwnerType()) {
+       if (!first) sb.append(", ");
+       sb.append("ownerType:");
+       if (this.ownerType == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.ownerType);
+       }
+       first = false;
+     }
++    if (isSetWriteId()) {
++      if (!first) sb.append(", ");
++      sb.append("writeId:");
++      sb.append(this.writeId);
++      first = false;
++    }
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     // check for sub-struct validity
+     if (sd != null) {
+       sd.validate();
+     }
+     if (privileges != null) {
+       privileges.validate();
+     }
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
+       // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+       __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class TableStandardSchemeFactory implements SchemeFactory {
+     public TableStandardScheme getScheme() {
+       return new TableStandardScheme();
+     }
+   }
+ 
+   private static class TableStandardScheme extends StandardScheme<Table> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // TABLE_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tableName = iprot.readString();
+               struct.setTableNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = iprot.readString();
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // OWNER
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.owner = iprot.readString();
+               struct.setOwnerIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // CREATE_TIME
+             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+               struct.createTime = iprot.readI32();
+               struct.setCreateTimeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 5: // LAST_ACCESS_TIME
+             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+               struct.lastAccessTime = iprot.readI32();
+               struct.setLastAccessTimeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 6: // RETENTION
+             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+               struct.retention = iprot.readI32();
+               struct.setRetentionIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 7: // SD
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.sd = new StorageDescriptor();
+               struct.sd.read(iprot);
+               struct.setSdIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 8: // PARTITION_KEYS
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list198 = iprot.readListBegin();
+                 struct.partitionKeys = new ArrayList<FieldSchema>(_list198.size);
+                 FieldSchema _elem199;
+                 for (int _i200 = 0; _i200 < _list198.size; ++_i200)
+                 {
+                   _elem199 = new FieldSchema();
+                   _elem199.read(iprot);
+                   struct.partitionKeys.add(_elem199);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setPartitionKeysIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 9: // PARAMETERS
+             if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+               {
+                 org.apache.thrift.protocol.TMap _map201 = iprot.readMapBegin();
+                 struct.parameters = new HashMap<String,String>(2*_map201.size);
+                 String _key202;
+                 String _val203;
+                 for (int _i204 = 0; _i204 < _map201.size; ++_i204)
+                 {
+                   _key202 = iprot.readString();
+                   _val203 = iprot.readString();
+                   struct.parameters.put(_key202, _val203);
+                 }
+                 iprot.readMapEnd();
+               }
+               struct.setParametersIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 10: // VIEW_ORIGINAL_TEXT
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.viewOriginalText = iprot.readString();
+               struct.setViewOriginalTextIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 11: // VIEW_EXPANDED_TEXT
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.viewExpandedText = iprot.readString();
+               struct.setViewExpandedTextIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 12: // TABLE_TYPE
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tableType = iprot.readString();
+               struct.setTableTypeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 13: // PRIVILEGES
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.privileges = new PrincipalPrivilegeSet();
+               struct.privileges.read(iprot);
+               struct.setPrivilegesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 14: // TEMPORARY
+             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+               struct.temporary = iprot.readBool();
+               struct.setTemporaryIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 15: // REWRITE_ENABLED
+             if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+               struct.rewriteEnabled = iprot.readBool();
+               struct.setRewriteEnabledIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 16: // CREATION_METADATA
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.creationMetadata = new CreationMetadata();
+               struct.creationMetadata.read(iprot);
+               struct.setCreationMetadataIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 17: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = iprot.readString();
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 18: // OWNER_TYPE
+             if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+               struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+               struct.setOwnerTypeIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 19: // WRITE_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.writeId = iprot.readI64();
++              struct.setWriteIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 20: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.tableName != null) {
+         oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+         oprot.writeString(struct.tableName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.owner != null) {
+         oprot.writeFieldBegin(OWNER_FIELD_DESC);
+         oprot.writeString(struct.owner);
+         oprot.writeFieldEnd();
+       }
+       oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+       oprot.writeI32(struct.createTime);
+       oprot.writeFieldEnd();
+       oprot.writeFieldBegin(LAST_ACCESS_TIME_FIELD_DESC);
+       oprot.writeI32(struct.lastAccessTime);
+       oprot.writeFieldEnd();
+       oprot.writeFieldBegin(RETENTION_FIELD_DESC);
+       oprot.writeI32(struct.retention);
+       oprot.writeFieldEnd();
+       if (struct.sd != null) {
+         oprot.writeFieldBegin(SD_FIELD_DESC);
+         struct.sd.write(oprot);
+         oprot.writeFieldEnd();
+       }
+       if (struct.partitionKeys != null) {
+         oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size()));
+           for (FieldSchema _iter205 : struct.partitionKeys)
+           {
+             _iter205.write(oprot);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.parameters != null) {
+         oprot.writeFieldBegin(PARAMETERS_FIELD_DESC);
+         {
+           oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size()));
+           for (Map.Entry<String, String> _iter206 : struct.parameters.entrySet())
+           {
+             oprot.writeString(_iter206.getKey());
+             oprot.writeString(_iter206.getValue());
+           }
+           oprot.writeMapEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.viewOriginalText != null) {
+         oprot.writeFieldBegin(VIEW_ORIGINAL_TEXT_FIELD_DESC);
+         oprot.writeString(struct.viewOriginalText);
+         oprot.writeFieldEnd();
+       }
+       if (struct.viewExpandedText != null) {
+         oprot.writeFieldBegin(VIEW_EXPANDED_TEXT_FIELD_DESC);
+         oprot.writeString(struct.viewExpandedText);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tableType != null) {
+         oprot.writeFieldBegin(TABLE_TYPE_FIELD_DESC);
+         oprot.writeString(struct.tableType);
+         oprot.writeFieldEnd();
+       }
+       if (struct.privileges != null) {
+         if (struct.isSetPrivileges()) {
+           oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+           struct.privileges.write(oprot);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.isSetTemporary()) {
+         oprot.writeFieldBegin(TEMPORARY_FIELD_DESC);
+         oprot.writeBool(struct.temporary);
+         oprot.writeFieldEnd();
+       }
+       if (struct.isSetRewriteEnabled()) {
+         oprot.writeFieldBegin(REWRITE_ENABLED_FIELD_DESC);
+         oprot.writeBool(struct.rewriteEnabled);
+         oprot.writeFieldEnd();
+       }
+       if (struct.creationMetadata != null) {
+         if (struct.isSetCreationMetadata()) {
+           oprot.writeFieldBegin(CREATION_METADATA_FIELD_DESC);
+           struct.creationMetadata.write(oprot);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.ownerType != null) {
+         if (struct.isSetOwnerType()) {
+           oprot.writeFieldBegin(OWNER_TYPE_FIELD_DESC);
+           oprot.writeI32(struct.ownerType.getValue());
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetWriteId()) {
++        oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
++        oprot.writeI64(struct.writeId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class TableTupleSchemeFactory implements SchemeFactory {
+     public TableTupleScheme getScheme() {
+       return new TableTupleScheme();
+     }
+   }
+ 
+   private static class TableTupleScheme extends TupleScheme<Table> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       BitSet optionals = new BitSet();
+       if (struct.isSetTableName()) {
+         optionals.set(0);
+       }
+       if (struct.isSetDbName()) {
+         optionals.set(1);
+       }
+       if (struct.isSetOwner()) {
+         optionals.set(2);
+       }
+       if (struct.isSetCreateTime()) {
+         optionals.set(3);
+       }
+       if (struct.isSetLastAccessTime()) {
+         optionals.set(4);
+       }
+       if (struct.isSetRetention()) {
+         optionals.set(5);
+       }
+       if (struct.isSetSd()) {
+         optionals.set(6);
+       }
+       if (struct.isSetPartitionKeys()) {
+         optionals.set(7);
+       }
+       if (struct.isSetParameters()) {
+         optionals.set(8);
+       }
+       if (struct.isSetViewOriginalText()) {
+         optionals.set(9);
+       }
+       if (struct.isSetViewExpandedText()) {
+         optionals.set(10);
+       }
+       if (struct.isSetTableType()) {
+         optionals.set(11);
+       }
+       if (struct.isSetPrivileges()) {
+         optionals.set(12);
+       }
+       if (struct.isSetTemporary()) {
+         optionals.set(13);
+       }
+       if (struct.isSetRewriteEnabled()) {
+         optionals.set(14);
+       }
+       if (struct.isSetCreationMetadata()) {
+         optionals.set(15);
+       }
+       if (struct.isSetCatName()) {
+         optionals.set(16);
+       }
+       if (struct.isSetOwnerType()) {
+         optionals.set(17);
+       }
 -      oprot.writeBitSet(optionals, 18);
++      if (struct.isSetWriteId()) {
++        optionals.set(18);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(19);
++      }
++      oprot.writeBitSet(optionals, 20);
+       if (struct.isSetTableName()) {
+         oprot.writeString(struct.tableName);
+       }
+       if (struct.isSetDbName()) {
+         oprot.writeString(struct.dbName);
+       }
+       if (struct.isSetOwner()) {
+         oprot.writeString(struct.owner);
+       }
+       if (struct.isSetCreateTime()) {
+         oprot.writeI32(struct.createTime);
+       }
+       if (struct.isSetLastAccessTime()) {
+         oprot.writeI32(struct.lastAccessTime);
+       }
+       if (struct.isSetRetention()) {
+         oprot.writeI32(struct.retention);
+       }
+       if (struct.isSetSd()) {
+         struct.sd.write(oprot);
+       }
+       if (struct.isSetPartitionKeys()) {
+         {
+           oprot.writeI32(struct.partitionKeys.size());
+           for (FieldSchema _iter207 : struct.partitionKeys)
+           {
+             _iter207.write(oprot);
+           }
+         }
+       }
+       if (struct.isSetParameters()) {
+         {
+           oprot.writeI32(struct.parameters.size());
+           for (Map.Entry<String, String> _iter208 : struct.parameters.entrySet())
+           {
+             oprot.writeString(_iter208.getKey());
+             oprot.writeString(_iter208.getValue());
+           }
+         }
+       }
+       if (struct.isSetViewOriginalText()) {
+         oprot.writeString(struct.viewOriginalText);
+       }
+       if (struct.isSetViewExpandedText()) {
+         oprot.writeString(struct.viewExpandedText);
+       }
+       if (struct.isSetTableType()) {
+         oprot.writeString(struct.tableType);
+       }
+       if (struct.isSetPrivileges()) {
+         struct.privileges.write(oprot);
+       }
+       if (struct.isSetTemporary()) {
+         oprot.writeBool(struct.temporary);
+       }
+       if (struct.isSetRewriteEnabled()) {
+         oprot.writeBool(struct.rewriteEnabled);
+       }
+       if (struct.isSetCreationMetadata()) {
+         struct.creationMetadata.write(oprot);
+       }
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
+       if (struct.isSetOwnerType()) {
+         oprot.writeI32(struct.ownerType.getValue());
+       }
++      if (struct.isSetWriteId()) {
++        oprot.writeI64(struct.writeId);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
 -      BitSet incoming = iprot.readBitSet(18);
++      BitSet incoming = iprot.readBitSet(20);
+       if (incoming.get(0)) {
+         struct.tableName = iprot.readString();
+         struct.setTableNameIsSet(true);
+       }
+       if (incoming.get(1)) {
+         struct.dbName = iprot.readString();
+         struct.setDbNameIsSet(true);
+       }
+       if (incoming.get(2)) {
+         struct.owner = iprot.readString();
+         struct.setOwnerIsSet(true);
+       }
+       if (incoming.get(3)) {
+         struct.createTime = iprot.readI32();
+         struct.setCreateTimeIsSet(true);
+       }
+       if (incoming.get(4)) {
+         struct.lastAccessTime = iprot.readI32();
+         struct.setLastAccessTimeIsSet(true);
+       }
+       if (incoming.get(5)) {
+         struct.retention = iprot.readI32();
+         struct.setRetentionIsSet(true);
+       }
+       if (incoming.get(6)) {
+         struct.sd = new StorageDescriptor();
+         struct.sd.read(iprot);
+         struct.setSdIsSet(true);
+       }
+       if (incoming.get(7)) {
+         {
+           org.apache.thrift.protocol.TList _list209 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+           struct.partitionKeys = new ArrayList<FieldSchema>(_list209.size);
+           FieldSchema _elem210;
+           for (int _i211 = 0; _i211 < _list209.size; ++_i211)
+           {
+             _elem210 = new FieldSchema();
+             _elem210.read(iprot);
+             struct.partitionKeys.add(_elem210);
+           }
+         }
+         struct.setPartitionKeysIsSet(true);
+       }
+       if (incoming.get(8)) {
+         {
+           org.apache.thrift.protocol.TMap _map212 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+           struct.parameters = new HashMap<String,String>(2*_map212.size);
+           String _key213;
+           String _val214;
+           for (int _i215 = 0; _i215 < _map212.size; ++_i215)
+           {
+             _key213 = iprot.readString();
+             _val214 = iprot.readString();
+             struct.parameters.put(_key213, _val214);
+           }
+         }
+         struct.setParametersIsSet(true);
+       }
+       if (incoming.get(9)) {
+         struct.viewOriginalText = iprot.readString();
+         struct.setViewOriginalTextIsSet(true);
+       }
+       if (incoming.get(10)) {
+         struct.viewExpandedText = iprot.readString();
+         struct.setViewExpandedTextIsSet(true);
+       }
+       if (incoming.get(11)) {
+         struct.tableType = iprot.readString();
+         struct.setTableTypeIsSet(true);
+       }
+       if (incoming.get(12)) {
+         struct.privileges = new PrincipalPrivilegeSet();
+         struct.privileges.read(iprot);
+         struct.setPrivilegesIsSet(true);
+       }
+       if (incoming.get(13)) {
+         struct.temporary = iprot.readBool();
+         struct.setTemporaryIsSet(true);
+       }
+       if (incoming.get(14)) {
+         struct.rewriteEnabled = iprot.readBool();
+         struct.setRewriteEnabledIsSet(true);
+       }
+       if (incoming.get(15)) {
+         struct.creationMetadata = new CreationMetadata();
+         struct.creationMetadata.read(iprot);
+         struct.setCreationMetadataIsSet(true);
+       }
+       if (incoming.get(16)) {
+         struct.catName = iprot.readString();
+         struct.setCatNameIsSet(true);
+       }
+       if (incoming.get(17)) {
+         struct.ownerType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+         struct.setOwnerTypeIsSet(true);
+       }
++      if (incoming.get(

<TRUNCATED>

[43/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
new file mode 100644
index 0000000..3ce72e9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java
@@ -0,0 +1,959 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddDynamicPartitions implements org.apache.thrift.TBase<AddDynamicPartitions, AddDynamicPartitions._Fields>, java.io.Serializable, Cloneable, Comparable<AddDynamicPartitions> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddDynamicPartitions");
+
+  private static final org.apache.thrift.protocol.TField TXNID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnid", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField WRITEID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeid", org.apache.thrift.protocol.TType.I64, (short)2);
+  private static final org.apache.thrift.protocol.TField DBNAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbname", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField TABLENAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tablename", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField PARTITIONNAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionnames", org.apache.thrift.protocol.TType.LIST, (short)5);
+  private static final org.apache.thrift.protocol.TField OPERATION_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationType", org.apache.thrift.protocol.TType.I32, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddDynamicPartitionsStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddDynamicPartitionsTupleSchemeFactory());
+  }
+
+  private long txnid; // required
+  private long writeid; // required
+  private String dbname; // required
+  private String tablename; // required
+  private List<String> partitionnames; // required
+  private DataOperationType operationType; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXNID((short)1, "txnid"),
+    WRITEID((short)2, "writeid"),
+    DBNAME((short)3, "dbname"),
+    TABLENAME((short)4, "tablename"),
+    PARTITIONNAMES((short)5, "partitionnames"),
+    /**
+     * 
+     * @see DataOperationType
+     */
+    OPERATION_TYPE((short)6, "operationType");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXNID
+          return TXNID;
+        case 2: // WRITEID
+          return WRITEID;
+        case 3: // DBNAME
+          return DBNAME;
+        case 4: // TABLENAME
+          return TABLENAME;
+        case 5: // PARTITIONNAMES
+          return PARTITIONNAMES;
+        case 6: // OPERATION_TYPE
+          return OPERATION_TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXNID_ISSET_ID = 0;
+  private static final int __WRITEID_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.OPERATION_TYPE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXNID, new org.apache.thrift.meta_data.FieldMetaData("txnid", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.WRITEID, new org.apache.thrift.meta_data.FieldMetaData("writeid", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.DBNAME, new org.apache.thrift.meta_data.FieldMetaData("dbname", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLENAME, new org.apache.thrift.meta_data.FieldMetaData("tablename", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTITIONNAMES, new org.apache.thrift.meta_data.FieldMetaData("partitionnames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.OPERATION_TYPE, new org.apache.thrift.meta_data.FieldMetaData("operationType", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, DataOperationType.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddDynamicPartitions.class, metaDataMap);
+  }
+
+  public AddDynamicPartitions() {
+    this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET;
+
+  }
+
+  public AddDynamicPartitions(
+    long txnid,
+    long writeid,
+    String dbname,
+    String tablename,
+    List<String> partitionnames)
+  {
+    this();
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+    this.writeid = writeid;
+    setWriteidIsSet(true);
+    this.dbname = dbname;
+    this.tablename = tablename;
+    this.partitionnames = partitionnames;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddDynamicPartitions(AddDynamicPartitions other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.txnid = other.txnid;
+    this.writeid = other.writeid;
+    if (other.isSetDbname()) {
+      this.dbname = other.dbname;
+    }
+    if (other.isSetTablename()) {
+      this.tablename = other.tablename;
+    }
+    if (other.isSetPartitionnames()) {
+      List<String> __this__partitionnames = new ArrayList<String>(other.partitionnames);
+      this.partitionnames = __this__partitionnames;
+    }
+    if (other.isSetOperationType()) {
+      this.operationType = other.operationType;
+    }
+  }
+
+  public AddDynamicPartitions deepCopy() {
+    return new AddDynamicPartitions(this);
+  }
+
+  @Override
+  public void clear() {
+    setTxnidIsSet(false);
+    this.txnid = 0;
+    setWriteidIsSet(false);
+    this.writeid = 0;
+    this.dbname = null;
+    this.tablename = null;
+    this.partitionnames = null;
+    this.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.UNSET;
+
+  }
+
+  public long getTxnid() {
+    return this.txnid;
+  }
+
+  public void setTxnid(long txnid) {
+    this.txnid = txnid;
+    setTxnidIsSet(true);
+  }
+
+  public void unsetTxnid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  /** Returns true if field txnid is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnid() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
+  }
+
+  public void setTxnidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
+  }
+
+  public long getWriteid() {
+    return this.writeid;
+  }
+
+  public void setWriteid(long writeid) {
+    this.writeid = writeid;
+    setWriteidIsSet(true);
+  }
+
+  public void unsetWriteid() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
+  }
+
+  /** Returns true if field writeid is set (has been assigned a value) and false otherwise */
+  public boolean isSetWriteid() {
+    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
+  }
+
+  public void setWriteidIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
+  }
+
+  public String getDbname() {
+    return this.dbname;
+  }
+
+  public void setDbname(String dbname) {
+    this.dbname = dbname;
+  }
+
+  public void unsetDbname() {
+    this.dbname = null;
+  }
+
+  /** Returns true if field dbname is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbname() {
+    return this.dbname != null;
+  }
+
+  public void setDbnameIsSet(boolean value) {
+    if (!value) {
+      this.dbname = null;
+    }
+  }
+
+  public String getTablename() {
+    return this.tablename;
+  }
+
+  public void setTablename(String tablename) {
+    this.tablename = tablename;
+  }
+
+  public void unsetTablename() {
+    this.tablename = null;
+  }
+
+  /** Returns true if field tablename is set (has been assigned a value) and false otherwise */
+  public boolean isSetTablename() {
+    return this.tablename != null;
+  }
+
+  public void setTablenameIsSet(boolean value) {
+    if (!value) {
+      this.tablename = null;
+    }
+  }
+
+  public int getPartitionnamesSize() {
+    return (this.partitionnames == null) ? 0 : this.partitionnames.size();
+  }
+
+  public java.util.Iterator<String> getPartitionnamesIterator() {
+    return (this.partitionnames == null) ? null : this.partitionnames.iterator();
+  }
+
+  public void addToPartitionnames(String elem) {
+    if (this.partitionnames == null) {
+      this.partitionnames = new ArrayList<String>();
+    }
+    this.partitionnames.add(elem);
+  }
+
+  public List<String> getPartitionnames() {
+    return this.partitionnames;
+  }
+
+  public void setPartitionnames(List<String> partitionnames) {
+    this.partitionnames = partitionnames;
+  }
+
+  public void unsetPartitionnames() {
+    this.partitionnames = null;
+  }
+
+  /** Returns true if field partitionnames is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitionnames() {
+    return this.partitionnames != null;
+  }
+
+  public void setPartitionnamesIsSet(boolean value) {
+    if (!value) {
+      this.partitionnames = null;
+    }
+  }
+
+  /**
+   * 
+   * @see DataOperationType
+   */
+  public DataOperationType getOperationType() {
+    return this.operationType;
+  }
+
+  /**
+   * 
+   * @see DataOperationType
+   */
+  public void setOperationType(DataOperationType operationType) {
+    this.operationType = operationType;
+  }
+
+  public void unsetOperationType() {
+    this.operationType = null;
+  }
+
+  /** Returns true if field operationType is set (has been assigned a value) and false otherwise */
+  public boolean isSetOperationType() {
+    return this.operationType != null;
+  }
+
+  public void setOperationTypeIsSet(boolean value) {
+    if (!value) {
+      this.operationType = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXNID:
+      if (value == null) {
+        unsetTxnid();
+      } else {
+        setTxnid((Long)value);
+      }
+      break;
+
+    case WRITEID:
+      if (value == null) {
+        unsetWriteid();
+      } else {
+        setWriteid((Long)value);
+      }
+      break;
+
+    case DBNAME:
+      if (value == null) {
+        unsetDbname();
+      } else {
+        setDbname((String)value);
+      }
+      break;
+
+    case TABLENAME:
+      if (value == null) {
+        unsetTablename();
+      } else {
+        setTablename((String)value);
+      }
+      break;
+
+    case PARTITIONNAMES:
+      if (value == null) {
+        unsetPartitionnames();
+      } else {
+        setPartitionnames((List<String>)value);
+      }
+      break;
+
+    case OPERATION_TYPE:
+      if (value == null) {
+        unsetOperationType();
+      } else {
+        setOperationType((DataOperationType)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXNID:
+      return getTxnid();
+
+    case WRITEID:
+      return getWriteid();
+
+    case DBNAME:
+      return getDbname();
+
+    case TABLENAME:
+      return getTablename();
+
+    case PARTITIONNAMES:
+      return getPartitionnames();
+
+    case OPERATION_TYPE:
+      return getOperationType();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXNID:
+      return isSetTxnid();
+    case WRITEID:
+      return isSetWriteid();
+    case DBNAME:
+      return isSetDbname();
+    case TABLENAME:
+      return isSetTablename();
+    case PARTITIONNAMES:
+      return isSetPartitionnames();
+    case OPERATION_TYPE:
+      return isSetOperationType();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddDynamicPartitions)
+      return this.equals((AddDynamicPartitions)that);
+    return false;
+  }
+
+  public boolean equals(AddDynamicPartitions that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txnid = true;
+    boolean that_present_txnid = true;
+    if (this_present_txnid || that_present_txnid) {
+      if (!(this_present_txnid && that_present_txnid))
+        return false;
+      if (this.txnid != that.txnid)
+        return false;
+    }
+
+    boolean this_present_writeid = true;
+    boolean that_present_writeid = true;
+    if (this_present_writeid || that_present_writeid) {
+      if (!(this_present_writeid && that_present_writeid))
+        return false;
+      if (this.writeid != that.writeid)
+        return false;
+    }
+
+    boolean this_present_dbname = true && this.isSetDbname();
+    boolean that_present_dbname = true && that.isSetDbname();
+    if (this_present_dbname || that_present_dbname) {
+      if (!(this_present_dbname && that_present_dbname))
+        return false;
+      if (!this.dbname.equals(that.dbname))
+        return false;
+    }
+
+    boolean this_present_tablename = true && this.isSetTablename();
+    boolean that_present_tablename = true && that.isSetTablename();
+    if (this_present_tablename || that_present_tablename) {
+      if (!(this_present_tablename && that_present_tablename))
+        return false;
+      if (!this.tablename.equals(that.tablename))
+        return false;
+    }
+
+    boolean this_present_partitionnames = true && this.isSetPartitionnames();
+    boolean that_present_partitionnames = true && that.isSetPartitionnames();
+    if (this_present_partitionnames || that_present_partitionnames) {
+      if (!(this_present_partitionnames && that_present_partitionnames))
+        return false;
+      if (!this.partitionnames.equals(that.partitionnames))
+        return false;
+    }
+
+    boolean this_present_operationType = true && this.isSetOperationType();
+    boolean that_present_operationType = true && that.isSetOperationType();
+    if (this_present_operationType || that_present_operationType) {
+      if (!(this_present_operationType && that_present_operationType))
+        return false;
+      if (!this.operationType.equals(that.operationType))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txnid = true;
+    list.add(present_txnid);
+    if (present_txnid)
+      list.add(txnid);
+
+    boolean present_writeid = true;
+    list.add(present_writeid);
+    if (present_writeid)
+      list.add(writeid);
+
+    boolean present_dbname = true && (isSetDbname());
+    list.add(present_dbname);
+    if (present_dbname)
+      list.add(dbname);
+
+    boolean present_tablename = true && (isSetTablename());
+    list.add(present_tablename);
+    if (present_tablename)
+      list.add(tablename);
+
+    boolean present_partitionnames = true && (isSetPartitionnames());
+    list.add(present_partitionnames);
+    if (present_partitionnames)
+      list.add(partitionnames);
+
+    boolean present_operationType = true && (isSetOperationType());
+    list.add(present_operationType);
+    if (present_operationType)
+      list.add(operationType.getValue());
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddDynamicPartitions other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxnid()).compareTo(other.isSetTxnid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnid, other.txnid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetWriteid()).compareTo(other.isSetWriteid());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetWriteid()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeid, other.writeid);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbname()).compareTo(other.isSetDbname());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbname()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbname, other.dbname);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTablename()).compareTo(other.isSetTablename());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTablename()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tablename, other.tablename);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartitionnames()).compareTo(other.isSetPartitionnames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitionnames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionnames, other.partitionnames);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOperationType()).compareTo(other.isSetOperationType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOperationType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationType, other.operationType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddDynamicPartitions(");
+    boolean first = true;
+
+    sb.append("txnid:");
+    sb.append(this.txnid);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("writeid:");
+    sb.append(this.writeid);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbname:");
+    if (this.dbname == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbname);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tablename:");
+    if (this.tablename == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tablename);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("partitionnames:");
+    if (this.partitionnames == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partitionnames);
+    }
+    first = false;
+    if (isSetOperationType()) {
+      if (!first) sb.append(", ");
+      sb.append("operationType:");
+      if (this.operationType == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.operationType);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxnid()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnid' is unset! Struct:" + toString());
+    }
+
+    if (!isSetWriteid()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'writeid' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDbname()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbname' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTablename()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tablename' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPartitionnames()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitionnames' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddDynamicPartitionsStandardSchemeFactory implements SchemeFactory {
+    public AddDynamicPartitionsStandardScheme getScheme() {
+      return new AddDynamicPartitionsStandardScheme();
+    }
+  }
+
+  private static class AddDynamicPartitionsStandardScheme extends StandardScheme<AddDynamicPartitions> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartitions struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXNID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txnid = iprot.readI64();
+              struct.setTxnidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // WRITEID
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.writeid = iprot.readI64();
+              struct.setWriteidIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // DBNAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbname = iprot.readString();
+              struct.setDbnameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // TABLENAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tablename = iprot.readString();
+              struct.setTablenameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // PARTITIONNAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list708 = iprot.readListBegin();
+                struct.partitionnames = new ArrayList<String>(_list708.size);
+                String _elem709;
+                for (int _i710 = 0; _i710 < _list708.size; ++_i710)
+                {
+                  _elem709 = iprot.readString();
+                  struct.partitionnames.add(_elem709);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionnamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // OPERATION_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.findByValue(iprot.readI32());
+              struct.setOperationTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitions struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TXNID_FIELD_DESC);
+      oprot.writeI64(struct.txnid);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(WRITEID_FIELD_DESC);
+      oprot.writeI64(struct.writeid);
+      oprot.writeFieldEnd();
+      if (struct.dbname != null) {
+        oprot.writeFieldBegin(DBNAME_FIELD_DESC);
+        oprot.writeString(struct.dbname);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tablename != null) {
+        oprot.writeFieldBegin(TABLENAME_FIELD_DESC);
+        oprot.writeString(struct.tablename);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partitionnames != null) {
+        oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size()));
+          for (String _iter711 : struct.partitionnames)
+          {
+            oprot.writeString(_iter711);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.operationType != null) {
+        if (struct.isSetOperationType()) {
+          oprot.writeFieldBegin(OPERATION_TYPE_FIELD_DESC);
+          oprot.writeI32(struct.operationType.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddDynamicPartitionsTupleSchemeFactory implements SchemeFactory {
+    public AddDynamicPartitionsTupleScheme getScheme() {
+      return new AddDynamicPartitionsTupleScheme();
+    }
+  }
+
+  private static class AddDynamicPartitionsTupleScheme extends TupleScheme<AddDynamicPartitions> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.txnid);
+      oprot.writeI64(struct.writeid);
+      oprot.writeString(struct.dbname);
+      oprot.writeString(struct.tablename);
+      {
+        oprot.writeI32(struct.partitionnames.size());
+        for (String _iter712 : struct.partitionnames)
+        {
+          oprot.writeString(_iter712);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetOperationType()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetOperationType()) {
+        oprot.writeI32(struct.operationType.getValue());
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.txnid = iprot.readI64();
+      struct.setTxnidIsSet(true);
+      struct.writeid = iprot.readI64();
+      struct.setWriteidIsSet(true);
+      struct.dbname = iprot.readString();
+      struct.setDbnameIsSet(true);
+      struct.tablename = iprot.readString();
+      struct.setTablenameIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list713 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.partitionnames = new ArrayList<String>(_list713.size);
+        String _elem714;
+        for (int _i715 = 0; _i715 < _list713.size; ++_i715)
+        {
+          _elem714 = iprot.readString();
+          struct.partitionnames.add(_elem714);
+        }
+      }
+      struct.setPartitionnamesIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.operationType = org.apache.hadoop.hive.metastore.api.DataOperationType.findByValue(iprot.readI32());
+        struct.setOperationTypeIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java
new file mode 100644
index 0000000..f57eb3b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddForeignKeyRequest.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddForeignKeyRequest implements org.apache.thrift.TBase<AddForeignKeyRequest, AddForeignKeyRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddForeignKeyRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddForeignKeyRequest");
+
+  private static final org.apache.thrift.protocol.TField FOREIGN_KEY_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeyCols", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddForeignKeyRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddForeignKeyRequestTupleSchemeFactory());
+  }
+
+  private List<SQLForeignKey> foreignKeyCols; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FOREIGN_KEY_COLS((short)1, "foreignKeyCols");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FOREIGN_KEY_COLS
+          return FOREIGN_KEY_COLS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FOREIGN_KEY_COLS, new org.apache.thrift.meta_data.FieldMetaData("foreignKeyCols", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLForeignKey.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddForeignKeyRequest.class, metaDataMap);
+  }
+
+  public AddForeignKeyRequest() {
+  }
+
+  public AddForeignKeyRequest(
+    List<SQLForeignKey> foreignKeyCols)
+  {
+    this();
+    this.foreignKeyCols = foreignKeyCols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddForeignKeyRequest(AddForeignKeyRequest other) {
+    if (other.isSetForeignKeyCols()) {
+      List<SQLForeignKey> __this__foreignKeyCols = new ArrayList<SQLForeignKey>(other.foreignKeyCols.size());
+      for (SQLForeignKey other_element : other.foreignKeyCols) {
+        __this__foreignKeyCols.add(new SQLForeignKey(other_element));
+      }
+      this.foreignKeyCols = __this__foreignKeyCols;
+    }
+  }
+
+  public AddForeignKeyRequest deepCopy() {
+    return new AddForeignKeyRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.foreignKeyCols = null;
+  }
+
+  public int getForeignKeyColsSize() {
+    return (this.foreignKeyCols == null) ? 0 : this.foreignKeyCols.size();
+  }
+
+  public java.util.Iterator<SQLForeignKey> getForeignKeyColsIterator() {
+    return (this.foreignKeyCols == null) ? null : this.foreignKeyCols.iterator();
+  }
+
+  public void addToForeignKeyCols(SQLForeignKey elem) {
+    if (this.foreignKeyCols == null) {
+      this.foreignKeyCols = new ArrayList<SQLForeignKey>();
+    }
+    this.foreignKeyCols.add(elem);
+  }
+
+  public List<SQLForeignKey> getForeignKeyCols() {
+    return this.foreignKeyCols;
+  }
+
+  public void setForeignKeyCols(List<SQLForeignKey> foreignKeyCols) {
+    this.foreignKeyCols = foreignKeyCols;
+  }
+
+  public void unsetForeignKeyCols() {
+    this.foreignKeyCols = null;
+  }
+
+  /** Returns true if field foreignKeyCols is set (has been assigned a value) and false otherwise */
+  public boolean isSetForeignKeyCols() {
+    return this.foreignKeyCols != null;
+  }
+
+  public void setForeignKeyColsIsSet(boolean value) {
+    if (!value) {
+      this.foreignKeyCols = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FOREIGN_KEY_COLS:
+      if (value == null) {
+        unsetForeignKeyCols();
+      } else {
+        setForeignKeyCols((List<SQLForeignKey>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FOREIGN_KEY_COLS:
+      return getForeignKeyCols();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FOREIGN_KEY_COLS:
+      return isSetForeignKeyCols();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddForeignKeyRequest)
+      return this.equals((AddForeignKeyRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddForeignKeyRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_foreignKeyCols = true && this.isSetForeignKeyCols();
+    boolean that_present_foreignKeyCols = true && that.isSetForeignKeyCols();
+    if (this_present_foreignKeyCols || that_present_foreignKeyCols) {
+      if (!(this_present_foreignKeyCols && that_present_foreignKeyCols))
+        return false;
+      if (!this.foreignKeyCols.equals(that.foreignKeyCols))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_foreignKeyCols = true && (isSetForeignKeyCols());
+    list.add(present_foreignKeyCols);
+    if (present_foreignKeyCols)
+      list.add(foreignKeyCols);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddForeignKeyRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetForeignKeyCols()).compareTo(other.isSetForeignKeyCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetForeignKeyCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignKeyCols, other.foreignKeyCols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddForeignKeyRequest(");
+    boolean first = true;
+
+    sb.append("foreignKeyCols:");
+    if (this.foreignKeyCols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.foreignKeyCols);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetForeignKeyCols()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'foreignKeyCols' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddForeignKeyRequestStandardSchemeFactory implements SchemeFactory {
+    public AddForeignKeyRequestStandardScheme getScheme() {
+      return new AddForeignKeyRequestStandardScheme();
+    }
+  }
+
+  private static class AddForeignKeyRequestStandardScheme extends StandardScheme<AddForeignKeyRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddForeignKeyRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FOREIGN_KEY_COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list376 = iprot.readListBegin();
+                struct.foreignKeyCols = new ArrayList<SQLForeignKey>(_list376.size);
+                SQLForeignKey _elem377;
+                for (int _i378 = 0; _i378 < _list376.size; ++_i378)
+                {
+                  _elem377 = new SQLForeignKey();
+                  _elem377.read(iprot);
+                  struct.foreignKeyCols.add(_elem377);
+                }
+                iprot.readListEnd();
+              }
+              struct.setForeignKeyColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddForeignKeyRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.foreignKeyCols != null) {
+        oprot.writeFieldBegin(FOREIGN_KEY_COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeyCols.size()));
+          for (SQLForeignKey _iter379 : struct.foreignKeyCols)
+          {
+            _iter379.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddForeignKeyRequestTupleSchemeFactory implements SchemeFactory {
+    public AddForeignKeyRequestTupleScheme getScheme() {
+      return new AddForeignKeyRequestTupleScheme();
+    }
+  }
+
+  private static class AddForeignKeyRequestTupleScheme extends TupleScheme<AddForeignKeyRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.foreignKeyCols.size());
+        for (SQLForeignKey _iter380 : struct.foreignKeyCols)
+        {
+          _iter380.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddForeignKeyRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list381 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.foreignKeyCols = new ArrayList<SQLForeignKey>(_list381.size);
+        SQLForeignKey _elem382;
+        for (int _i383 = 0; _i383 < _list381.size; ++_i383)
+        {
+          _elem382 = new SQLForeignKey();
+          _elem382.read(iprot);
+          struct.foreignKeyCols.add(_elem382);
+        }
+      }
+      struct.setForeignKeyColsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java
new file mode 100644
index 0000000..e6bac16
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddNotNullConstraintRequest.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddNotNullConstraintRequest implements org.apache.thrift.TBase<AddNotNullConstraintRequest, AddNotNullConstraintRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddNotNullConstraintRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddNotNullConstraintRequest");
+
+  private static final org.apache.thrift.protocol.TField NOT_NULL_CONSTRAINT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("notNullConstraintCols", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddNotNullConstraintRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddNotNullConstraintRequestTupleSchemeFactory());
+  }
+
+  private List<SQLNotNullConstraint> notNullConstraintCols; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NOT_NULL_CONSTRAINT_COLS((short)1, "notNullConstraintCols");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NOT_NULL_CONSTRAINT_COLS
+          return NOT_NULL_CONSTRAINT_COLS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NOT_NULL_CONSTRAINT_COLS, new org.apache.thrift.meta_data.FieldMetaData("notNullConstraintCols", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLNotNullConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddNotNullConstraintRequest.class, metaDataMap);
+  }
+
+  public AddNotNullConstraintRequest() {
+  }
+
+  public AddNotNullConstraintRequest(
+    List<SQLNotNullConstraint> notNullConstraintCols)
+  {
+    this();
+    this.notNullConstraintCols = notNullConstraintCols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddNotNullConstraintRequest(AddNotNullConstraintRequest other) {
+    if (other.isSetNotNullConstraintCols()) {
+      List<SQLNotNullConstraint> __this__notNullConstraintCols = new ArrayList<SQLNotNullConstraint>(other.notNullConstraintCols.size());
+      for (SQLNotNullConstraint other_element : other.notNullConstraintCols) {
+        __this__notNullConstraintCols.add(new SQLNotNullConstraint(other_element));
+      }
+      this.notNullConstraintCols = __this__notNullConstraintCols;
+    }
+  }
+
+  public AddNotNullConstraintRequest deepCopy() {
+    return new AddNotNullConstraintRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.notNullConstraintCols = null;
+  }
+
+  public int getNotNullConstraintColsSize() {
+    return (this.notNullConstraintCols == null) ? 0 : this.notNullConstraintCols.size();
+  }
+
+  public java.util.Iterator<SQLNotNullConstraint> getNotNullConstraintColsIterator() {
+    return (this.notNullConstraintCols == null) ? null : this.notNullConstraintCols.iterator();
+  }
+
+  public void addToNotNullConstraintCols(SQLNotNullConstraint elem) {
+    if (this.notNullConstraintCols == null) {
+      this.notNullConstraintCols = new ArrayList<SQLNotNullConstraint>();
+    }
+    this.notNullConstraintCols.add(elem);
+  }
+
+  public List<SQLNotNullConstraint> getNotNullConstraintCols() {
+    return this.notNullConstraintCols;
+  }
+
+  public void setNotNullConstraintCols(List<SQLNotNullConstraint> notNullConstraintCols) {
+    this.notNullConstraintCols = notNullConstraintCols;
+  }
+
+  public void unsetNotNullConstraintCols() {
+    this.notNullConstraintCols = null;
+  }
+
+  /** Returns true if field notNullConstraintCols is set (has been assigned a value) and false otherwise */
+  public boolean isSetNotNullConstraintCols() {
+    return this.notNullConstraintCols != null;
+  }
+
+  public void setNotNullConstraintColsIsSet(boolean value) {
+    if (!value) {
+      this.notNullConstraintCols = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case NOT_NULL_CONSTRAINT_COLS:
+      if (value == null) {
+        unsetNotNullConstraintCols();
+      } else {
+        setNotNullConstraintCols((List<SQLNotNullConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case NOT_NULL_CONSTRAINT_COLS:
+      return getNotNullConstraintCols();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case NOT_NULL_CONSTRAINT_COLS:
+      return isSetNotNullConstraintCols();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddNotNullConstraintRequest)
+      return this.equals((AddNotNullConstraintRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddNotNullConstraintRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_notNullConstraintCols = true && this.isSetNotNullConstraintCols();
+    boolean that_present_notNullConstraintCols = true && that.isSetNotNullConstraintCols();
+    if (this_present_notNullConstraintCols || that_present_notNullConstraintCols) {
+      if (!(this_present_notNullConstraintCols && that_present_notNullConstraintCols))
+        return false;
+      if (!this.notNullConstraintCols.equals(that.notNullConstraintCols))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_notNullConstraintCols = true && (isSetNotNullConstraintCols());
+    list.add(present_notNullConstraintCols);
+    if (present_notNullConstraintCols)
+      list.add(notNullConstraintCols);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddNotNullConstraintRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetNotNullConstraintCols()).compareTo(other.isSetNotNullConstraintCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNotNullConstraintCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.notNullConstraintCols, other.notNullConstraintCols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddNotNullConstraintRequest(");
+    boolean first = true;
+
+    sb.append("notNullConstraintCols:");
+    if (this.notNullConstraintCols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.notNullConstraintCols);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetNotNullConstraintCols()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'notNullConstraintCols' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddNotNullConstraintRequestStandardSchemeFactory implements SchemeFactory {
+    public AddNotNullConstraintRequestStandardScheme getScheme() {
+      return new AddNotNullConstraintRequestStandardScheme();
+    }
+  }
+
+  private static class AddNotNullConstraintRequestStandardScheme extends StandardScheme<AddNotNullConstraintRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // NOT_NULL_CONSTRAINT_COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list392 = iprot.readListBegin();
+                struct.notNullConstraintCols = new ArrayList<SQLNotNullConstraint>(_list392.size);
+                SQLNotNullConstraint _elem393;
+                for (int _i394 = 0; _i394 < _list392.size; ++_i394)
+                {
+                  _elem393 = new SQLNotNullConstraint();
+                  _elem393.read(iprot);
+                  struct.notNullConstraintCols.add(_elem393);
+                }
+                iprot.readListEnd();
+              }
+              struct.setNotNullConstraintColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.notNullConstraintCols != null) {
+        oprot.writeFieldBegin(NOT_NULL_CONSTRAINT_COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.notNullConstraintCols.size()));
+          for (SQLNotNullConstraint _iter395 : struct.notNullConstraintCols)
+          {
+            _iter395.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddNotNullConstraintRequestTupleSchemeFactory implements SchemeFactory {
+    public AddNotNullConstraintRequestTupleScheme getScheme() {
+      return new AddNotNullConstraintRequestTupleScheme();
+    }
+  }
+
+  private static class AddNotNullConstraintRequestTupleScheme extends TupleScheme<AddNotNullConstraintRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.notNullConstraintCols.size());
+        for (SQLNotNullConstraint _iter396 : struct.notNullConstraintCols)
+        {
+          _iter396.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddNotNullConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list397 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.notNullConstraintCols = new ArrayList<SQLNotNullConstraint>(_list397.size);
+        SQLNotNullConstraint _elem398;
+        for (int _i399 = 0; _i399 < _list397.size; ++_i399)
+        {
+          _elem398 = new SQLNotNullConstraint();
+          _elem398.read(iprot);
+          struct.notNullConstraintCols.add(_elem398);
+        }
+      }
+      struct.setNotNullConstraintColsIsSet(true);
+    }
+  }
+
+}
+


[06/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java
new file mode 100644
index 0000000..7d9ebba
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java
@@ -0,0 +1,710 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PutFileMetadataRequest implements org.apache.thrift.TBase<PutFileMetadataRequest, PutFileMetadataRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PutFileMetadataRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PutFileMetadataRequest");
+
+  private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.LIST, (short)2);
+  private static final org.apache.thrift.protocol.TField TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("type", org.apache.thrift.protocol.TType.I32, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PutFileMetadataRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PutFileMetadataRequestTupleSchemeFactory());
+  }
+
+  private List<Long> fileIds; // required
+  private List<ByteBuffer> metadata; // required
+  private FileMetadataExprType type; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FILE_IDS((short)1, "fileIds"),
+    METADATA((short)2, "metadata"),
+    /**
+     * 
+     * @see FileMetadataExprType
+     */
+    TYPE((short)3, "type");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FILE_IDS
+          return FILE_IDS;
+        case 2: // METADATA
+          return METADATA;
+        case 3: // TYPE
+          return TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.TYPE};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
+    tmpMap.put(_Fields.TYPE, new org.apache.thrift.meta_data.FieldMetaData("type", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, FileMetadataExprType.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PutFileMetadataRequest.class, metaDataMap);
+  }
+
+  public PutFileMetadataRequest() {
+  }
+
+  public PutFileMetadataRequest(
+    List<Long> fileIds,
+    List<ByteBuffer> metadata)
+  {
+    this();
+    this.fileIds = fileIds;
+    this.metadata = metadata;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PutFileMetadataRequest(PutFileMetadataRequest other) {
+    if (other.isSetFileIds()) {
+      List<Long> __this__fileIds = new ArrayList<Long>(other.fileIds);
+      this.fileIds = __this__fileIds;
+    }
+    if (other.isSetMetadata()) {
+      List<ByteBuffer> __this__metadata = new ArrayList<ByteBuffer>(other.metadata);
+      this.metadata = __this__metadata;
+    }
+    if (other.isSetType()) {
+      this.type = other.type;
+    }
+  }
+
+  public PutFileMetadataRequest deepCopy() {
+    return new PutFileMetadataRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.fileIds = null;
+    this.metadata = null;
+    this.type = null;
+  }
+
+  public int getFileIdsSize() {
+    return (this.fileIds == null) ? 0 : this.fileIds.size();
+  }
+
+  public java.util.Iterator<Long> getFileIdsIterator() {
+    return (this.fileIds == null) ? null : this.fileIds.iterator();
+  }
+
+  public void addToFileIds(long elem) {
+    if (this.fileIds == null) {
+      this.fileIds = new ArrayList<Long>();
+    }
+    this.fileIds.add(elem);
+  }
+
+  public List<Long> getFileIds() {
+    return this.fileIds;
+  }
+
+  public void setFileIds(List<Long> fileIds) {
+    this.fileIds = fileIds;
+  }
+
+  public void unsetFileIds() {
+    this.fileIds = null;
+  }
+
+  /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetFileIds() {
+    return this.fileIds != null;
+  }
+
+  public void setFileIdsIsSet(boolean value) {
+    if (!value) {
+      this.fileIds = null;
+    }
+  }
+
+  public int getMetadataSize() {
+    return (this.metadata == null) ? 0 : this.metadata.size();
+  }
+
+  public java.util.Iterator<ByteBuffer> getMetadataIterator() {
+    return (this.metadata == null) ? null : this.metadata.iterator();
+  }
+
+  public void addToMetadata(ByteBuffer elem) {
+    if (this.metadata == null) {
+      this.metadata = new ArrayList<ByteBuffer>();
+    }
+    this.metadata.add(elem);
+  }
+
+  public List<ByteBuffer> getMetadata() {
+    return this.metadata;
+  }
+
+  public void setMetadata(List<ByteBuffer> metadata) {
+    this.metadata = metadata;
+  }
+
+  public void unsetMetadata() {
+    this.metadata = null;
+  }
+
+  /** Returns true if field metadata is set (has been assigned a value) and false otherwise */
+  public boolean isSetMetadata() {
+    return this.metadata != null;
+  }
+
+  public void setMetadataIsSet(boolean value) {
+    if (!value) {
+      this.metadata = null;
+    }
+  }
+
+  /**
+   * 
+   * @see FileMetadataExprType
+   */
+  public FileMetadataExprType getType() {
+    return this.type;
+  }
+
+  /**
+   * 
+   * @see FileMetadataExprType
+   */
+  public void setType(FileMetadataExprType type) {
+    this.type = type;
+  }
+
+  public void unsetType() {
+    this.type = null;
+  }
+
+  /** Returns true if field type is set (has been assigned a value) and false otherwise */
+  public boolean isSetType() {
+    return this.type != null;
+  }
+
+  public void setTypeIsSet(boolean value) {
+    if (!value) {
+      this.type = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FILE_IDS:
+      if (value == null) {
+        unsetFileIds();
+      } else {
+        setFileIds((List<Long>)value);
+      }
+      break;
+
+    case METADATA:
+      if (value == null) {
+        unsetMetadata();
+      } else {
+        setMetadata((List<ByteBuffer>)value);
+      }
+      break;
+
+    case TYPE:
+      if (value == null) {
+        unsetType();
+      } else {
+        setType((FileMetadataExprType)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FILE_IDS:
+      return getFileIds();
+
+    case METADATA:
+      return getMetadata();
+
+    case TYPE:
+      return getType();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FILE_IDS:
+      return isSetFileIds();
+    case METADATA:
+      return isSetMetadata();
+    case TYPE:
+      return isSetType();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PutFileMetadataRequest)
+      return this.equals((PutFileMetadataRequest)that);
+    return false;
+  }
+
+  public boolean equals(PutFileMetadataRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_fileIds = true && this.isSetFileIds();
+    boolean that_present_fileIds = true && that.isSetFileIds();
+    if (this_present_fileIds || that_present_fileIds) {
+      if (!(this_present_fileIds && that_present_fileIds))
+        return false;
+      if (!this.fileIds.equals(that.fileIds))
+        return false;
+    }
+
+    boolean this_present_metadata = true && this.isSetMetadata();
+    boolean that_present_metadata = true && that.isSetMetadata();
+    if (this_present_metadata || that_present_metadata) {
+      if (!(this_present_metadata && that_present_metadata))
+        return false;
+      if (!this.metadata.equals(that.metadata))
+        return false;
+    }
+
+    boolean this_present_type = true && this.isSetType();
+    boolean that_present_type = true && that.isSetType();
+    if (this_present_type || that_present_type) {
+      if (!(this_present_type && that_present_type))
+        return false;
+      if (!this.type.equals(that.type))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_fileIds = true && (isSetFileIds());
+    list.add(present_fileIds);
+    if (present_fileIds)
+      list.add(fileIds);
+
+    boolean present_metadata = true && (isSetMetadata());
+    list.add(present_metadata);
+    if (present_metadata)
+      list.add(metadata);
+
+    boolean present_type = true && (isSetType());
+    list.add(present_type);
+    if (present_type)
+      list.add(type.getValue());
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PutFileMetadataRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFileIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMetadata()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetType()).compareTo(other.isSetType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.type, other.type);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PutFileMetadataRequest(");
+    boolean first = true;
+
+    sb.append("fileIds:");
+    if (this.fileIds == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fileIds);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("metadata:");
+    if (this.metadata == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.metadata, sb);
+    }
+    first = false;
+    if (isSetType()) {
+      if (!first) sb.append(", ");
+      sb.append("type:");
+      if (this.type == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.type);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFileIds()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString());
+    }
+
+    if (!isSetMetadata()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PutFileMetadataRequestStandardSchemeFactory implements SchemeFactory {
+    public PutFileMetadataRequestStandardScheme getScheme() {
+      return new PutFileMetadataRequestStandardScheme();
+    }
+  }
+
+  private static class PutFileMetadataRequestStandardScheme extends StandardScheme<PutFileMetadataRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FILE_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list808 = iprot.readListBegin();
+                struct.fileIds = new ArrayList<Long>(_list808.size);
+                long _elem809;
+                for (int _i810 = 0; _i810 < _list808.size; ++_i810)
+                {
+                  _elem809 = iprot.readI64();
+                  struct.fileIds.add(_elem809);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFileIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // METADATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list811 = iprot.readListBegin();
+                struct.metadata = new ArrayList<ByteBuffer>(_list811.size);
+                ByteBuffer _elem812;
+                for (int _i813 = 0; _i813 < _list811.size; ++_i813)
+                {
+                  _elem812 = iprot.readBinary();
+                  struct.metadata.add(_elem812);
+                }
+                iprot.readListEnd();
+              }
+              struct.setMetadataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.type = org.apache.hadoop.hive.metastore.api.FileMetadataExprType.findByValue(iprot.readI32());
+              struct.setTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.fileIds != null) {
+        oprot.writeFieldBegin(FILE_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size()));
+          for (long _iter814 : struct.fileIds)
+          {
+            oprot.writeI64(_iter814);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.metadata != null) {
+        oprot.writeFieldBegin(METADATA_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size()));
+          for (ByteBuffer _iter815 : struct.metadata)
+          {
+            oprot.writeBinary(_iter815);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.type != null) {
+        if (struct.isSetType()) {
+          oprot.writeFieldBegin(TYPE_FIELD_DESC);
+          oprot.writeI32(struct.type.getValue());
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PutFileMetadataRequestTupleSchemeFactory implements SchemeFactory {
+    public PutFileMetadataRequestTupleScheme getScheme() {
+      return new PutFileMetadataRequestTupleScheme();
+    }
+  }
+
+  private static class PutFileMetadataRequestTupleScheme extends TupleScheme<PutFileMetadataRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.fileIds.size());
+        for (long _iter816 : struct.fileIds)
+        {
+          oprot.writeI64(_iter816);
+        }
+      }
+      {
+        oprot.writeI32(struct.metadata.size());
+        for (ByteBuffer _iter817 : struct.metadata)
+        {
+          oprot.writeBinary(_iter817);
+        }
+      }
+      BitSet optionals = new BitSet();
+      if (struct.isSetType()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetType()) {
+        oprot.writeI32(struct.type.getValue());
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list818 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.fileIds = new ArrayList<Long>(_list818.size);
+        long _elem819;
+        for (int _i820 = 0; _i820 < _list818.size; ++_i820)
+        {
+          _elem819 = iprot.readI64();
+          struct.fileIds.add(_elem819);
+        }
+      }
+      struct.setFileIdsIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list821 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.metadata = new ArrayList<ByteBuffer>(_list821.size);
+        ByteBuffer _elem822;
+        for (int _i823 = 0; _i823 < _list821.size; ++_i823)
+        {
+          _elem822 = iprot.readBinary();
+          struct.metadata.add(_elem822);
+        }
+      }
+      struct.setMetadataIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.type = org.apache.hadoop.hive.metastore.api.FileMetadataExprType.findByValue(iprot.readI32());
+        struct.setTypeIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java
new file mode 100644
index 0000000..76eec8c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataResult.java
@@ -0,0 +1,283 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PutFileMetadataResult implements org.apache.thrift.TBase<PutFileMetadataResult, PutFileMetadataResult._Fields>, java.io.Serializable, Cloneable, Comparable<PutFileMetadataResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PutFileMetadataResult");
+
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PutFileMetadataResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PutFileMetadataResultTupleSchemeFactory());
+  }
+
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+;
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PutFileMetadataResult.class, metaDataMap);
+  }
+
+  public PutFileMetadataResult() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PutFileMetadataResult(PutFileMetadataResult other) {
+  }
+
+  public PutFileMetadataResult deepCopy() {
+    return new PutFileMetadataResult(this);
+  }
+
+  @Override
+  public void clear() {
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PutFileMetadataResult)
+      return this.equals((PutFileMetadataResult)that);
+    return false;
+  }
+
+  public boolean equals(PutFileMetadataResult that) {
+    if (that == null)
+      return false;
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PutFileMetadataResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PutFileMetadataResult(");
+    boolean first = true;
+
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PutFileMetadataResultStandardSchemeFactory implements SchemeFactory {
+    public PutFileMetadataResultStandardScheme getScheme() {
+      return new PutFileMetadataResultStandardScheme();
+    }
+  }
+
+  private static class PutFileMetadataResultStandardScheme extends StandardScheme<PutFileMetadataResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PutFileMetadataResultTupleSchemeFactory implements SchemeFactory {
+    public PutFileMetadataResultTupleScheme getScheme() {
+      return new PutFileMetadataResultTupleScheme();
+    }
+  }
+
+  private static class PutFileMetadataResultTupleScheme extends TupleScheme<PutFileMetadataResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java
new file mode 100644
index 0000000..0aeca14
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ReplTblWriteIdStateRequest.java
@@ -0,0 +1,952 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ReplTblWriteIdStateRequest implements org.apache.thrift.TBase<ReplTblWriteIdStateRequest, ReplTblWriteIdStateRequest._Fields>, java.io.Serializable, Cloneable, Comparable<ReplTblWriteIdStateRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ReplTblWriteIdStateRequest");
+
+  private static final org.apache.thrift.protocol.TField VALID_WRITE_IDLIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdlist", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField USER_FIELD_DESC = new org.apache.thrift.protocol.TField("user", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField HOST_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("hostName", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)5);
+  private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ReplTblWriteIdStateRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ReplTblWriteIdStateRequestTupleSchemeFactory());
+  }
+
+  private String validWriteIdlist; // required
+  private String user; // required
+  private String hostName; // required
+  private String dbName; // required
+  private String tableName; // required
+  private List<String> partNames; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    VALID_WRITE_IDLIST((short)1, "validWriteIdlist"),
+    USER((short)2, "user"),
+    HOST_NAME((short)3, "hostName"),
+    DB_NAME((short)4, "dbName"),
+    TABLE_NAME((short)5, "tableName"),
+    PART_NAMES((short)6, "partNames");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // VALID_WRITE_IDLIST
+          return VALID_WRITE_IDLIST;
+        case 2: // USER
+          return USER;
+        case 3: // HOST_NAME
+          return HOST_NAME;
+        case 4: // DB_NAME
+          return DB_NAME;
+        case 5: // TABLE_NAME
+          return TABLE_NAME;
+        case 6: // PART_NAMES
+          return PART_NAMES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.PART_NAMES};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.VALID_WRITE_IDLIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdlist", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.USER, new org.apache.thrift.meta_data.FieldMetaData("user", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.HOST_NAME, new org.apache.thrift.meta_data.FieldMetaData("hostName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ReplTblWriteIdStateRequest.class, metaDataMap);
+  }
+
+  public ReplTblWriteIdStateRequest() {
+  }
+
+  public ReplTblWriteIdStateRequest(
+    String validWriteIdlist,
+    String user,
+    String hostName,
+    String dbName,
+    String tableName)
+  {
+    this();
+    this.validWriteIdlist = validWriteIdlist;
+    this.user = user;
+    this.hostName = hostName;
+    this.dbName = dbName;
+    this.tableName = tableName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ReplTblWriteIdStateRequest(ReplTblWriteIdStateRequest other) {
+    if (other.isSetValidWriteIdlist()) {
+      this.validWriteIdlist = other.validWriteIdlist;
+    }
+    if (other.isSetUser()) {
+      this.user = other.user;
+    }
+    if (other.isSetHostName()) {
+      this.hostName = other.hostName;
+    }
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetPartNames()) {
+      List<String> __this__partNames = new ArrayList<String>(other.partNames);
+      this.partNames = __this__partNames;
+    }
+  }
+
+  public ReplTblWriteIdStateRequest deepCopy() {
+    return new ReplTblWriteIdStateRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.validWriteIdlist = null;
+    this.user = null;
+    this.hostName = null;
+    this.dbName = null;
+    this.tableName = null;
+    this.partNames = null;
+  }
+
+  public String getValidWriteIdlist() {
+    return this.validWriteIdlist;
+  }
+
+  public void setValidWriteIdlist(String validWriteIdlist) {
+    this.validWriteIdlist = validWriteIdlist;
+  }
+
+  public void unsetValidWriteIdlist() {
+    this.validWriteIdlist = null;
+  }
+
+  /** Returns true if field validWriteIdlist is set (has been assigned a value) and false otherwise */
+  public boolean isSetValidWriteIdlist() {
+    return this.validWriteIdlist != null;
+  }
+
+  public void setValidWriteIdlistIsSet(boolean value) {
+    if (!value) {
+      this.validWriteIdlist = null;
+    }
+  }
+
+  public String getUser() {
+    return this.user;
+  }
+
+  public void setUser(String user) {
+    this.user = user;
+  }
+
+  public void unsetUser() {
+    this.user = null;
+  }
+
+  /** Returns true if field user is set (has been assigned a value) and false otherwise */
+  public boolean isSetUser() {
+    return this.user != null;
+  }
+
+  public void setUserIsSet(boolean value) {
+    if (!value) {
+      this.user = null;
+    }
+  }
+
+  public String getHostName() {
+    return this.hostName;
+  }
+
+  public void setHostName(String hostName) {
+    this.hostName = hostName;
+  }
+
+  public void unsetHostName() {
+    this.hostName = null;
+  }
+
+  /** Returns true if field hostName is set (has been assigned a value) and false otherwise */
+  public boolean isSetHostName() {
+    return this.hostName != null;
+  }
+
+  public void setHostNameIsSet(boolean value) {
+    if (!value) {
+      this.hostName = null;
+    }
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public int getPartNamesSize() {
+    return (this.partNames == null) ? 0 : this.partNames.size();
+  }
+
+  public java.util.Iterator<String> getPartNamesIterator() {
+    return (this.partNames == null) ? null : this.partNames.iterator();
+  }
+
+  public void addToPartNames(String elem) {
+    if (this.partNames == null) {
+      this.partNames = new ArrayList<String>();
+    }
+    this.partNames.add(elem);
+  }
+
+  public List<String> getPartNames() {
+    return this.partNames;
+  }
+
+  public void setPartNames(List<String> partNames) {
+    this.partNames = partNames;
+  }
+
+  public void unsetPartNames() {
+    this.partNames = null;
+  }
+
+  /** Returns true if field partNames is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartNames() {
+    return this.partNames != null;
+  }
+
+  public void setPartNamesIsSet(boolean value) {
+    if (!value) {
+      this.partNames = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case VALID_WRITE_IDLIST:
+      if (value == null) {
+        unsetValidWriteIdlist();
+      } else {
+        setValidWriteIdlist((String)value);
+      }
+      break;
+
+    case USER:
+      if (value == null) {
+        unsetUser();
+      } else {
+        setUser((String)value);
+      }
+      break;
+
+    case HOST_NAME:
+      if (value == null) {
+        unsetHostName();
+      } else {
+        setHostName((String)value);
+      }
+      break;
+
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case PART_NAMES:
+      if (value == null) {
+        unsetPartNames();
+      } else {
+        setPartNames((List<String>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case VALID_WRITE_IDLIST:
+      return getValidWriteIdlist();
+
+    case USER:
+      return getUser();
+
+    case HOST_NAME:
+      return getHostName();
+
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case PART_NAMES:
+      return getPartNames();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case VALID_WRITE_IDLIST:
+      return isSetValidWriteIdlist();
+    case USER:
+      return isSetUser();
+    case HOST_NAME:
+      return isSetHostName();
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case PART_NAMES:
+      return isSetPartNames();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ReplTblWriteIdStateRequest)
+      return this.equals((ReplTblWriteIdStateRequest)that);
+    return false;
+  }
+
+  public boolean equals(ReplTblWriteIdStateRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_validWriteIdlist = true && this.isSetValidWriteIdlist();
+    boolean that_present_validWriteIdlist = true && that.isSetValidWriteIdlist();
+    if (this_present_validWriteIdlist || that_present_validWriteIdlist) {
+      if (!(this_present_validWriteIdlist && that_present_validWriteIdlist))
+        return false;
+      if (!this.validWriteIdlist.equals(that.validWriteIdlist))
+        return false;
+    }
+
+    boolean this_present_user = true && this.isSetUser();
+    boolean that_present_user = true && that.isSetUser();
+    if (this_present_user || that_present_user) {
+      if (!(this_present_user && that_present_user))
+        return false;
+      if (!this.user.equals(that.user))
+        return false;
+    }
+
+    boolean this_present_hostName = true && this.isSetHostName();
+    boolean that_present_hostName = true && that.isSetHostName();
+    if (this_present_hostName || that_present_hostName) {
+      if (!(this_present_hostName && that_present_hostName))
+        return false;
+      if (!this.hostName.equals(that.hostName))
+        return false;
+    }
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_partNames = true && this.isSetPartNames();
+    boolean that_present_partNames = true && that.isSetPartNames();
+    if (this_present_partNames || that_present_partNames) {
+      if (!(this_present_partNames && that_present_partNames))
+        return false;
+      if (!this.partNames.equals(that.partNames))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_validWriteIdlist = true && (isSetValidWriteIdlist());
+    list.add(present_validWriteIdlist);
+    if (present_validWriteIdlist)
+      list.add(validWriteIdlist);
+
+    boolean present_user = true && (isSetUser());
+    list.add(present_user);
+    if (present_user)
+      list.add(user);
+
+    boolean present_hostName = true && (isSetHostName());
+    list.add(present_hostName);
+    if (present_hostName)
+      list.add(hostName);
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_partNames = true && (isSetPartNames());
+    list.add(present_partNames);
+    if (present_partNames)
+      list.add(partNames);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ReplTblWriteIdStateRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetValidWriteIdlist()).compareTo(other.isSetValidWriteIdlist());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValidWriteIdlist()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdlist, other.validWriteIdlist);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetUser()).compareTo(other.isSetUser());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUser()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.user, other.user);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHostName()).compareTo(other.isSetHostName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHostName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hostName, other.hostName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartNames()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ReplTblWriteIdStateRequest(");
+    boolean first = true;
+
+    sb.append("validWriteIdlist:");
+    if (this.validWriteIdlist == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.validWriteIdlist);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("user:");
+    if (this.user == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.user);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("hostName:");
+    if (this.hostName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.hostName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (isSetPartNames()) {
+      if (!first) sb.append(", ");
+      sb.append("partNames:");
+      if (this.partNames == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partNames);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetValidWriteIdlist()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'validWriteIdlist' is unset! Struct:" + toString());
+    }
+
+    if (!isSetUser()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'user' is unset! Struct:" + toString());
+    }
+
+    if (!isSetHostName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hostName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTableName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ReplTblWriteIdStateRequestStandardSchemeFactory implements SchemeFactory {
+    public ReplTblWriteIdStateRequestStandardScheme getScheme() {
+      return new ReplTblWriteIdStateRequestStandardScheme();
+    }
+  }
+
+  private static class ReplTblWriteIdStateRequestStandardScheme extends StandardScheme<ReplTblWriteIdStateRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ReplTblWriteIdStateRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // VALID_WRITE_IDLIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.validWriteIdlist = iprot.readString();
+              struct.setValidWriteIdlistIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // USER
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.user = iprot.readString();
+              struct.setUserIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // HOST_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.hostName = iprot.readString();
+              struct.setHostNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // PART_NAMES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list602 = iprot.readListBegin();
+                struct.partNames = new ArrayList<String>(_list602.size);
+                String _elem603;
+                for (int _i604 = 0; _i604 < _list602.size; ++_i604)
+                {
+                  _elem603 = iprot.readString();
+                  struct.partNames.add(_elem603);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartNamesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ReplTblWriteIdStateRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.validWriteIdlist != null) {
+        oprot.writeFieldBegin(VALID_WRITE_IDLIST_FIELD_DESC);
+        oprot.writeString(struct.validWriteIdlist);
+        oprot.writeFieldEnd();
+      }
+      if (struct.user != null) {
+        oprot.writeFieldBegin(USER_FIELD_DESC);
+        oprot.writeString(struct.user);
+        oprot.writeFieldEnd();
+      }
+      if (struct.hostName != null) {
+        oprot.writeFieldBegin(HOST_NAME_FIELD_DESC);
+        oprot.writeString(struct.hostName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.partNames != null) {
+        if (struct.isSetPartNames()) {
+          oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
+            for (String _iter605 : struct.partNames)
+            {
+              oprot.writeString(_iter605);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ReplTblWriteIdStateRequestTupleSchemeFactory implements SchemeFactory {
+    public ReplTblWriteIdStateRequestTupleScheme getScheme() {
+      return new ReplTblWriteIdStateRequestTupleScheme();
+    }
+  }
+
+  private static class ReplTblWriteIdStateRequestTupleScheme extends TupleScheme<ReplTblWriteIdStateRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.validWriteIdlist);
+      oprot.writeString(struct.user);
+      oprot.writeString(struct.hostName);
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tableName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartNames()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPartNames()) {
+        {
+          oprot.writeI32(struct.partNames.size());
+          for (String _iter606 : struct.partNames)
+          {
+            oprot.writeString(_iter606);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ReplTblWriteIdStateRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.validWriteIdlist = iprot.readString();
+      struct.setValidWriteIdlistIsSet(true);
+      struct.user = iprot.readString();
+      struct.setUserIsSet(true);
+      struct.hostName = iprot.readString();
+      struct.setHostNameIsSet(true);
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tableName = iprot.readString();
+      struct.setTableNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list607 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.partNames = new ArrayList<String>(_list607.size);
+          String _elem608;
+          for (int _i609 = 0; _i609 < _list607.size; ++_i609)
+          {
+            _elem608 = iprot.readString();
+            struct.partNames.add(_elem608);
+          }
+        }
+        struct.setPartNamesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java
new file mode 100644
index 0000000..bad44ad
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class RequestPartsSpec extends org.apache.thrift.TUnion<RequestPartsSpec, RequestPartsSpec._Fields> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RequestPartsSpec");
+  private static final org.apache.thrift.protocol.TField NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("names", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField EXPRS_FIELD_DESC = new org.apache.thrift.protocol.TField("exprs", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    NAMES((short)1, "names"),
+    EXPRS((short)2, "exprs");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // NAMES
+          return NAMES;
+        case 2: // EXPRS
+          return EXPRS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.NAMES, new org.apache.thrift.meta_data.FieldMetaData("names", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.EXPRS, new org.apache.thrift.meta_data.FieldMetaData("exprs", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, DropPartitionsExpr.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RequestPartsSpec.class, metaDataMap);
+  }
+
+  public RequestPartsSpec() {
+    super();
+  }
+
+  public RequestPartsSpec(_Fields setField, Object value) {
+    super(setField, value);
+  }
+
+  public RequestPartsSpec(RequestPartsSpec other) {
+    super(other);
+  }
+  public RequestPartsSpec deepCopy() {
+    return new RequestPartsSpec(this);
+  }
+
+  public static RequestPartsSpec names(List<String> value) {
+    RequestPartsSpec x = new RequestPartsSpec();
+    x.setNames(value);
+    return x;
+  }
+
+  public static RequestPartsSpec exprs(List<DropPartitionsExpr> value) {
+    RequestPartsSpec x = new RequestPartsSpec();
+    x.setExprs(value);
+    return x;
+  }
+
+
+  @Override
+  protected void checkType(_Fields setField, Object value) throws ClassCastException {
+    switch (setField) {
+      case NAMES:
+        if (value instanceof List) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type List<String> for field 'names', but got " + value.getClass().getSimpleName());
+      case EXPRS:
+        if (value instanceof List) {
+          break;
+        }
+        throw new ClassCastException("Was expecting value of type List<DropPartitionsExpr> for field 'exprs', but got " + value.getClass().getSimpleName());
+      default:
+        throw new IllegalArgumentException("Unknown field id " + setField);
+    }
+  }
+
+  @Override
+  protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TField field) throws org.apache.thrift.TException {
+    _Fields setField = _Fields.findByThriftId(field.id);
+    if (setField != null) {
+      switch (setField) {
+        case NAMES:
+          if (field.type == NAMES_FIELD_DESC.type) {
+            List<String> names;
+            {
+              org.apache.thrift.protocol.TList _list498 = iprot.readListBegin();
+              names = new ArrayList<String>(_list498.size);
+              String _elem499;
+              for (int _i500 = 0; _i500 < _list498.size; ++_i500)
+              {
+                _elem499 = iprot.readString();
+                names.add(_elem499);
+              }
+              iprot.readListEnd();
+            }
+            return names;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        case EXPRS:
+          if (field.type == EXPRS_FIELD_DESC.type) {
+            List<DropPartitionsExpr> exprs;
+            {
+              org.apache.thrift.protocol.TList _list501 = iprot.readListBegin();
+              exprs = new ArrayList<DropPartitionsExpr>(_list501.size);
+              DropPartitionsExpr _elem502;
+              for (int _i503 = 0; _i503 < _list501.size; ++_i503)
+              {
+                _elem502 = new DropPartitionsExpr();
+                _elem502.read(iprot);
+                exprs.add(_elem502);
+              }
+              iprot.readListEnd();
+            }
+            return exprs;
+          } else {
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+            return null;
+          }
+        default:
+          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
+      }
+    } else {
+      org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+      return null;
+    }
+  }
+
+  @Override
+  protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    switch (setField_) {
+      case NAMES:
+        List<String> names = (List<String>)value_;
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size()));
+          for (String _iter504 : names)
+          {
+            oprot.writeString(_iter504);
+          }
+          oprot.writeListEnd();
+        }
+        return;
+      case EXPRS:
+        List<DropPartitionsExpr> exprs = (List<DropPartitionsExpr>)value_;
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size()));
+          for (DropPartitionsExpr _iter505 : exprs)
+          {
+            _iter505.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        return;
+      default:
+        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
+    }
+  }
+
+  @Override
+  protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot, short fieldID) throws org.apache.thrift.TException {
+    _Fields setField = _Fields.findByThriftId(fieldID);
+    if (setField != null) {
+      switch (setField) {
+        case NAMES:
+          List<String> names;
+          {
+            org.apache.thrift.protocol.TList _list506 = iprot.readListBegin();
+            names = new ArrayList<String>(_list506.size);
+            String _elem507;
+            for (int _i508 = 0; _i508 < _list506.size; ++_i508)
+            {
+              _elem507 = iprot.readString();
+              names.add(_elem507);
+            }
+            iprot.readListEnd();
+          }
+          return names;
+        case EXPRS:
+          List<DropPartitionsExpr> exprs;
+          {
+            org.apache.thrift.protocol.TList _list509 = iprot.readListBegin();
+            exprs = new ArrayList<DropPartitionsExpr>(_list509.size);
+            DropPartitionsExpr _elem510;
+            for (int _i511 = 0; _i511 < _list509.size; ++_i511)
+            {
+              _elem510 = new DropPartitionsExpr();
+              _elem510.read(iprot);
+              exprs.add(_elem510);
+            }
+            iprot.readListEnd();
+          }
+          return exprs;
+        default:
+          throw new IllegalStateException("setField wasn't null, but didn't match any of the case statements!");
+      }
+    } else {
+      throw new TProtocolException("Couldn't find a field with field id " + fieldID);
+    }
+  }
+
+  @Override
+  protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    switch (setField_) {
+      case NAMES:
+        List<String> names = (List<String>)value_;
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size()));
+          for (String _iter512 : names)
+          {
+            oprot.writeString(_iter512);
+          }
+          oprot.writeListEnd();
+        }
+        return;
+      case EXPRS:
+        List<DropPartitionsExpr> exprs = (List<DropPartitionsExpr>)value_;
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size()));
+          for (DropPartitionsExpr _iter513 : exprs)
+          {
+            _iter513.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        return;
+      default:
+        throw new IllegalStateException("Cannot write union with unknown field " + setField_);
+    }
+  }
+
+  @Override
+  protected org.apache.thrift.protocol.TField getFieldDesc(_Fields setField) {
+    switch (setField) {
+      case NAMES:
+        return NAMES_FIELD_DESC;
+      case EXPRS:
+        return EXPRS_FIELD_DESC;
+      default:
+        throw new IllegalArgumentException("Unknown field id " + setField);
+    }
+  }
+
+  @Override
+  protected org.apache.thrift.protocol.TStruct getStructDesc() {
+    return STRUCT_DESC;
+  }
+
+  @Override
+  protected _Fields enumForId(short id) {
+    return _Fields.findByThriftIdOrThrow(id);
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+
+  public List<String> getNames() {
+    if (getSetField() == _Fields.NAMES) {
+      return (List<String>)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'names' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setNames(List<String> value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.NAMES;
+    value_ = value;
+  }
+
+  public List<DropPartitionsExpr> getExprs() {
+    if (getSetField() == _Fields.EXPRS) {
+      return (List<DropPartitionsExpr>)getFieldValue();
+    } else {
+      throw new RuntimeException("Cannot get field 'exprs' because union is currently set to " + getFieldDesc(getSetField()).name);
+    }
+  }
+
+  public void setExprs(List<DropPartitionsExpr> value) {
+    if (value == null) throw new NullPointerException();
+    setField_ = _Fields.EXPRS;
+    value_ = value;
+  }
+
+  public boolean isSetNames() {
+    return setField_ == _Fields.NAMES;
+  }
+
+
+  public boolean isSetExprs() {
+    return setField_ == _Fields.EXPRS;
+  }
+
+
+  public boolean equals(Object other) {
+    if (other instanceof RequestPartsSpec) {
+      return equals((RequestPartsSpec)other);
+    } else {
+      return false;
+    }
+  }
+
+  public boolean equals(RequestPartsSpec other) {
+    return other != null && getSetField() == other.getSetField() && getFieldValue().equals(other.getFieldValue());
+  }
+
+  @Override
+  public int compareTo(RequestPartsSpec other) {
+    int lastComparison = org.apache.thrift.TBaseHelper.compareTo(getSetField(), other.getSetField());
+    if (lastComparison == 0) {
+      return org.apache.thrift.TBaseHelper.compareTo(getFieldValue(), other.getFieldValue());
+    }
+    return lastComparison;
+  }
+
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+    list.add(this.getClass().getName());
+    org.apache.thrift.TFieldIdEnum setField = getSetField();
+    if (setField != null) {
+      list.add(setField.getThriftFieldId());
+      Object value = getFieldValue();
+      if (value instanceof org.apache.thrift.TEnum) {
+        list.add(((org.apache.thrift.TEnum)getFieldValue()).getValue());
+      } else {
+        list.add(value);
+      }
+    }
+    return list.hashCode();
+  }
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+
+}

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceType.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceType.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceType.java
new file mode 100644
index 0000000..a15c1ee
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceType.java
@@ -0,0 +1,48 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+
+import java.util.Map;
+import java.util.HashMap;
+import org.apache.thrift.TEnum;
+
+public enum ResourceType implements org.apache.thrift.TEnum {
+  JAR(1),
+  FILE(2),
+  ARCHIVE(3);
+
+  private final int value;
+
+  private ResourceType(int value) {
+    this.value = value;
+  }
+
+  /**
+   * Get the integer value of this enum value, as defined in the Thrift IDL.
+   */
+  public int getValue() {
+    return value;
+  }
+
+  /**
+   * Find a the enum type by its integer value, as defined in the Thrift IDL.
+   * @return null if the value is not found.
+   */
+  public static ResourceType findByValue(int value) { 
+    switch (value) {
+      case 1:
+        return JAR;
+      case 2:
+        return FILE;
+      case 3:
+        return ARCHIVE;
+      default:
+        return null;
+    }
+  }
+}


[75/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
index 0000000,2bd958e..fdcd3de
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-rb/thrift_hive_metastore.rb
@@@ -1,0 -1,13779 +1,13845 @@@
+ #
+ # Autogenerated by Thrift Compiler (0.9.3)
+ #
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ #
+ 
+ require 'thrift'
+ require 'facebook_service'
+ require 'hive_metastore_types'
+ 
+ module ThriftHiveMetastore
+   class Client < ::FacebookService::Client 
+     include ::Thrift::Client
+ 
+     def getMetaConf(key)
+       send_getMetaConf(key)
+       return recv_getMetaConf()
+     end
+ 
+     def send_getMetaConf(key)
+       send_message('getMetaConf', GetMetaConf_args, :key => key)
+     end
+ 
+     def recv_getMetaConf()
+       result = receive_message(GetMetaConf_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'getMetaConf failed: unknown result')
+     end
+ 
+     def setMetaConf(key, value)
+       send_setMetaConf(key, value)
+       recv_setMetaConf()
+     end
+ 
+     def send_setMetaConf(key, value)
+       send_message('setMetaConf', SetMetaConf_args, :key => key, :value => value)
+     end
+ 
+     def recv_setMetaConf()
+       result = receive_message(SetMetaConf_result)
+       raise result.o1 unless result.o1.nil?
+       return
+     end
+ 
+     def create_catalog(catalog)
+       send_create_catalog(catalog)
+       recv_create_catalog()
+     end
+ 
+     def send_create_catalog(catalog)
+       send_message('create_catalog', Create_catalog_args, :catalog => catalog)
+     end
+ 
+     def recv_create_catalog()
+       result = receive_message(Create_catalog_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def alter_catalog(rqst)
+       send_alter_catalog(rqst)
+       recv_alter_catalog()
+     end
+ 
+     def send_alter_catalog(rqst)
+       send_message('alter_catalog', Alter_catalog_args, :rqst => rqst)
+     end
+ 
+     def recv_alter_catalog()
+       result = receive_message(Alter_catalog_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def get_catalog(catName)
+       send_get_catalog(catName)
+       return recv_get_catalog()
+     end
+ 
+     def send_get_catalog(catName)
+       send_message('get_catalog', Get_catalog_args, :catName => catName)
+     end
+ 
+     def recv_get_catalog()
+       result = receive_message(Get_catalog_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_catalog failed: unknown result')
+     end
+ 
+     def get_catalogs()
+       send_get_catalogs()
+       return recv_get_catalogs()
+     end
+ 
+     def send_get_catalogs()
+       send_message('get_catalogs', Get_catalogs_args)
+     end
+ 
+     def recv_get_catalogs()
+       result = receive_message(Get_catalogs_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_catalogs failed: unknown result')
+     end
+ 
+     def drop_catalog(catName)
+       send_drop_catalog(catName)
+       recv_drop_catalog()
+     end
+ 
+     def send_drop_catalog(catName)
+       send_message('drop_catalog', Drop_catalog_args, :catName => catName)
+     end
+ 
+     def recv_drop_catalog()
+       result = receive_message(Drop_catalog_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def create_database(database)
+       send_create_database(database)
+       recv_create_database()
+     end
+ 
+     def send_create_database(database)
+       send_message('create_database', Create_database_args, :database => database)
+     end
+ 
+     def recv_create_database()
+       result = receive_message(Create_database_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def get_database(name)
+       send_get_database(name)
+       return recv_get_database()
+     end
+ 
+     def send_get_database(name)
+       send_message('get_database', Get_database_args, :name => name)
+     end
+ 
+     def recv_get_database()
+       result = receive_message(Get_database_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_database failed: unknown result')
+     end
+ 
+     def drop_database(name, deleteData, cascade)
+       send_drop_database(name, deleteData, cascade)
+       recv_drop_database()
+     end
+ 
+     def send_drop_database(name, deleteData, cascade)
+       send_message('drop_database', Drop_database_args, :name => name, :deleteData => deleteData, :cascade => cascade)
+     end
+ 
+     def recv_drop_database()
+       result = receive_message(Drop_database_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def get_databases(pattern)
+       send_get_databases(pattern)
+       return recv_get_databases()
+     end
+ 
+     def send_get_databases(pattern)
+       send_message('get_databases', Get_databases_args, :pattern => pattern)
+     end
+ 
+     def recv_get_databases()
+       result = receive_message(Get_databases_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_databases failed: unknown result')
+     end
+ 
+     def get_all_databases()
+       send_get_all_databases()
+       return recv_get_all_databases()
+     end
+ 
+     def send_get_all_databases()
+       send_message('get_all_databases', Get_all_databases_args)
+     end
+ 
+     def recv_get_all_databases()
+       result = receive_message(Get_all_databases_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_databases failed: unknown result')
+     end
+ 
+     def alter_database(dbname, db)
+       send_alter_database(dbname, db)
+       recv_alter_database()
+     end
+ 
+     def send_alter_database(dbname, db)
+       send_message('alter_database', Alter_database_args, :dbname => dbname, :db => db)
+     end
+ 
+     def recv_alter_database()
+       result = receive_message(Alter_database_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def get_type(name)
+       send_get_type(name)
+       return recv_get_type()
+     end
+ 
+     def send_get_type(name)
+       send_message('get_type', Get_type_args, :name => name)
+     end
+ 
+     def recv_get_type()
+       result = receive_message(Get_type_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_type failed: unknown result')
+     end
+ 
+     def create_type(type)
+       send_create_type(type)
+       return recv_create_type()
+     end
+ 
+     def send_create_type(type)
+       send_message('create_type', Create_type_args, :type => type)
+     end
+ 
+     def recv_create_type()
+       result = receive_message(Create_type_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_type failed: unknown result')
+     end
+ 
+     def drop_type(type)
+       send_drop_type(type)
+       return recv_drop_type()
+     end
+ 
+     def send_drop_type(type)
+       send_message('drop_type', Drop_type_args, :type => type)
+     end
+ 
+     def recv_drop_type()
+       result = receive_message(Drop_type_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_type failed: unknown result')
+     end
+ 
+     def get_type_all(name)
+       send_get_type_all(name)
+       return recv_get_type_all()
+     end
+ 
+     def send_get_type_all(name)
+       send_message('get_type_all', Get_type_all_args, :name => name)
+     end
+ 
+     def recv_get_type_all()
+       result = receive_message(Get_type_all_result)
+       return result.success unless result.success.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_type_all failed: unknown result')
+     end
+ 
+     def get_fields(db_name, table_name)
+       send_get_fields(db_name, table_name)
+       return recv_get_fields()
+     end
+ 
+     def send_get_fields(db_name, table_name)
+       send_message('get_fields', Get_fields_args, :db_name => db_name, :table_name => table_name)
+     end
+ 
+     def recv_get_fields()
+       result = receive_message(Get_fields_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_fields failed: unknown result')
+     end
+ 
+     def get_fields_with_environment_context(db_name, table_name, environment_context)
+       send_get_fields_with_environment_context(db_name, table_name, environment_context)
+       return recv_get_fields_with_environment_context()
+     end
+ 
+     def send_get_fields_with_environment_context(db_name, table_name, environment_context)
+       send_message('get_fields_with_environment_context', Get_fields_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context)
+     end
+ 
+     def recv_get_fields_with_environment_context()
+       result = receive_message(Get_fields_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_fields_with_environment_context failed: unknown result')
+     end
+ 
+     def get_schema(db_name, table_name)
+       send_get_schema(db_name, table_name)
+       return recv_get_schema()
+     end
+ 
+     def send_get_schema(db_name, table_name)
+       send_message('get_schema', Get_schema_args, :db_name => db_name, :table_name => table_name)
+     end
+ 
+     def recv_get_schema()
+       result = receive_message(Get_schema_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema failed: unknown result')
+     end
+ 
+     def get_schema_with_environment_context(db_name, table_name, environment_context)
+       send_get_schema_with_environment_context(db_name, table_name, environment_context)
+       return recv_get_schema_with_environment_context()
+     end
+ 
+     def send_get_schema_with_environment_context(db_name, table_name, environment_context)
+       send_message('get_schema_with_environment_context', Get_schema_with_environment_context_args, :db_name => db_name, :table_name => table_name, :environment_context => environment_context)
+     end
+ 
+     def recv_get_schema_with_environment_context()
+       result = receive_message(Get_schema_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_schema_with_environment_context failed: unknown result')
+     end
+ 
+     def create_table(tbl)
+       send_create_table(tbl)
+       recv_create_table()
+     end
+ 
+     def send_create_table(tbl)
+       send_message('create_table', Create_table_args, :tbl => tbl)
+     end
+ 
+     def recv_create_table()
+       result = receive_message(Create_table_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       return
+     end
+ 
+     def create_table_with_environment_context(tbl, environment_context)
+       send_create_table_with_environment_context(tbl, environment_context)
+       recv_create_table_with_environment_context()
+     end
+ 
+     def send_create_table_with_environment_context(tbl, environment_context)
+       send_message('create_table_with_environment_context', Create_table_with_environment_context_args, :tbl => tbl, :environment_context => environment_context)
+     end
+ 
+     def recv_create_table_with_environment_context()
+       result = receive_message(Create_table_with_environment_context_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       return
+     end
+ 
+     def create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)
+       send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)
+       recv_create_table_with_constraints()
+     end
+ 
+     def send_create_table_with_constraints(tbl, primaryKeys, foreignKeys, uniqueConstraints, notNullConstraints, defaultConstraints, checkConstraints)
+       send_message('create_table_with_constraints', Create_table_with_constraints_args, :tbl => tbl, :primaryKeys => primaryKeys, :foreignKeys => foreignKeys, :uniqueConstraints => uniqueConstraints, :notNullConstraints => notNullConstraints, :defaultConstraints => defaultConstraints, :checkConstraints => checkConstraints)
+     end
+ 
+     def recv_create_table_with_constraints()
+       result = receive_message(Create_table_with_constraints_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       return
+     end
+ 
+     def drop_constraint(req)
+       send_drop_constraint(req)
+       recv_drop_constraint()
+     end
+ 
+     def send_drop_constraint(req)
+       send_message('drop_constraint', Drop_constraint_args, :req => req)
+     end
+ 
+     def recv_drop_constraint()
+       result = receive_message(Drop_constraint_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def add_primary_key(req)
+       send_add_primary_key(req)
+       recv_add_primary_key()
+     end
+ 
+     def send_add_primary_key(req)
+       send_message('add_primary_key', Add_primary_key_args, :req => req)
+     end
+ 
+     def recv_add_primary_key()
+       result = receive_message(Add_primary_key_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def add_foreign_key(req)
+       send_add_foreign_key(req)
+       recv_add_foreign_key()
+     end
+ 
+     def send_add_foreign_key(req)
+       send_message('add_foreign_key', Add_foreign_key_args, :req => req)
+     end
+ 
+     def recv_add_foreign_key()
+       result = receive_message(Add_foreign_key_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def add_unique_constraint(req)
+       send_add_unique_constraint(req)
+       recv_add_unique_constraint()
+     end
+ 
+     def send_add_unique_constraint(req)
+       send_message('add_unique_constraint', Add_unique_constraint_args, :req => req)
+     end
+ 
+     def recv_add_unique_constraint()
+       result = receive_message(Add_unique_constraint_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def add_not_null_constraint(req)
+       send_add_not_null_constraint(req)
+       recv_add_not_null_constraint()
+     end
+ 
+     def send_add_not_null_constraint(req)
+       send_message('add_not_null_constraint', Add_not_null_constraint_args, :req => req)
+     end
+ 
+     def recv_add_not_null_constraint()
+       result = receive_message(Add_not_null_constraint_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def add_default_constraint(req)
+       send_add_default_constraint(req)
+       recv_add_default_constraint()
+     end
+ 
+     def send_add_default_constraint(req)
+       send_message('add_default_constraint', Add_default_constraint_args, :req => req)
+     end
+ 
+     def recv_add_default_constraint()
+       result = receive_message(Add_default_constraint_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def add_check_constraint(req)
+       send_add_check_constraint(req)
+       recv_add_check_constraint()
+     end
+ 
+     def send_add_check_constraint(req)
+       send_message('add_check_constraint', Add_check_constraint_args, :req => req)
+     end
+ 
+     def recv_add_check_constraint()
+       result = receive_message(Add_check_constraint_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def drop_table(dbname, name, deleteData)
+       send_drop_table(dbname, name, deleteData)
+       recv_drop_table()
+     end
+ 
+     def send_drop_table(dbname, name, deleteData)
+       send_message('drop_table', Drop_table_args, :dbname => dbname, :name => name, :deleteData => deleteData)
+     end
+ 
+     def recv_drop_table()
+       result = receive_message(Drop_table_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def drop_table_with_environment_context(dbname, name, deleteData, environment_context)
+       send_drop_table_with_environment_context(dbname, name, deleteData, environment_context)
+       recv_drop_table_with_environment_context()
+     end
+ 
+     def send_drop_table_with_environment_context(dbname, name, deleteData, environment_context)
+       send_message('drop_table_with_environment_context', Drop_table_with_environment_context_args, :dbname => dbname, :name => name, :deleteData => deleteData, :environment_context => environment_context)
+     end
+ 
+     def recv_drop_table_with_environment_context()
+       result = receive_message(Drop_table_with_environment_context_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def truncate_table(dbName, tableName, partNames)
+       send_truncate_table(dbName, tableName, partNames)
+       recv_truncate_table()
+     end
+ 
+     def send_truncate_table(dbName, tableName, partNames)
+       send_message('truncate_table', Truncate_table_args, :dbName => dbName, :tableName => tableName, :partNames => partNames)
+     end
+ 
+     def recv_truncate_table()
+       result = receive_message(Truncate_table_result)
+       raise result.o1 unless result.o1.nil?
+       return
+     end
+ 
+     def get_tables(db_name, pattern)
+       send_get_tables(db_name, pattern)
+       return recv_get_tables()
+     end
+ 
+     def send_get_tables(db_name, pattern)
+       send_message('get_tables', Get_tables_args, :db_name => db_name, :pattern => pattern)
+     end
+ 
+     def recv_get_tables()
+       result = receive_message(Get_tables_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_tables failed: unknown result')
+     end
+ 
+     def get_tables_by_type(db_name, pattern, tableType)
+       send_get_tables_by_type(db_name, pattern, tableType)
+       return recv_get_tables_by_type()
+     end
+ 
+     def send_get_tables_by_type(db_name, pattern, tableType)
+       send_message('get_tables_by_type', Get_tables_by_type_args, :db_name => db_name, :pattern => pattern, :tableType => tableType)
+     end
+ 
+     def recv_get_tables_by_type()
+       result = receive_message(Get_tables_by_type_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_tables_by_type failed: unknown result')
+     end
+ 
+     def get_materialized_views_for_rewriting(db_name)
+       send_get_materialized_views_for_rewriting(db_name)
+       return recv_get_materialized_views_for_rewriting()
+     end
+ 
+     def send_get_materialized_views_for_rewriting(db_name)
+       send_message('get_materialized_views_for_rewriting', Get_materialized_views_for_rewriting_args, :db_name => db_name)
+     end
+ 
+     def recv_get_materialized_views_for_rewriting()
+       result = receive_message(Get_materialized_views_for_rewriting_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_materialized_views_for_rewriting failed: unknown result')
+     end
+ 
+     def get_table_meta(db_patterns, tbl_patterns, tbl_types)
+       send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
+       return recv_get_table_meta()
+     end
+ 
+     def send_get_table_meta(db_patterns, tbl_patterns, tbl_types)
+       send_message('get_table_meta', Get_table_meta_args, :db_patterns => db_patterns, :tbl_patterns => tbl_patterns, :tbl_types => tbl_types)
+     end
+ 
+     def recv_get_table_meta()
+       result = receive_message(Get_table_meta_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_meta failed: unknown result')
+     end
+ 
+     def get_all_tables(db_name)
+       send_get_all_tables(db_name)
+       return recv_get_all_tables()
+     end
+ 
+     def send_get_all_tables(db_name)
+       send_message('get_all_tables', Get_all_tables_args, :db_name => db_name)
+     end
+ 
+     def recv_get_all_tables()
+       result = receive_message(Get_all_tables_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_tables failed: unknown result')
+     end
+ 
+     def get_table(dbname, tbl_name)
+       send_get_table(dbname, tbl_name)
+       return recv_get_table()
+     end
+ 
+     def send_get_table(dbname, tbl_name)
+       send_message('get_table', Get_table_args, :dbname => dbname, :tbl_name => tbl_name)
+     end
+ 
+     def recv_get_table()
+       result = receive_message(Get_table_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table failed: unknown result')
+     end
+ 
+     def get_table_objects_by_name(dbname, tbl_names)
+       send_get_table_objects_by_name(dbname, tbl_names)
+       return recv_get_table_objects_by_name()
+     end
+ 
+     def send_get_table_objects_by_name(dbname, tbl_names)
+       send_message('get_table_objects_by_name', Get_table_objects_by_name_args, :dbname => dbname, :tbl_names => tbl_names)
+     end
+ 
+     def recv_get_table_objects_by_name()
+       result = receive_message(Get_table_objects_by_name_result)
+       return result.success unless result.success.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_objects_by_name failed: unknown result')
+     end
+ 
+     def get_table_req(req)
+       send_get_table_req(req)
+       return recv_get_table_req()
+     end
+ 
+     def send_get_table_req(req)
+       send_message('get_table_req', Get_table_req_args, :req => req)
+     end
+ 
+     def recv_get_table_req()
+       result = receive_message(Get_table_req_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_req failed: unknown result')
+     end
+ 
+     def get_table_objects_by_name_req(req)
+       send_get_table_objects_by_name_req(req)
+       return recv_get_table_objects_by_name_req()
+     end
+ 
+     def send_get_table_objects_by_name_req(req)
+       send_message('get_table_objects_by_name_req', Get_table_objects_by_name_req_args, :req => req)
+     end
+ 
+     def recv_get_table_objects_by_name_req()
+       result = receive_message(Get_table_objects_by_name_req_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_objects_by_name_req failed: unknown result')
+     end
+ 
+     def get_materialization_invalidation_info(dbname, tbl_names)
+       send_get_materialization_invalidation_info(dbname, tbl_names)
+       return recv_get_materialization_invalidation_info()
+     end
+ 
+     def send_get_materialization_invalidation_info(dbname, tbl_names)
+       send_message('get_materialization_invalidation_info', Get_materialization_invalidation_info_args, :dbname => dbname, :tbl_names => tbl_names)
+     end
+ 
+     def recv_get_materialization_invalidation_info()
+       result = receive_message(Get_materialization_invalidation_info_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_materialization_invalidation_info failed: unknown result')
+     end
+ 
+     def update_creation_metadata(catName, dbname, tbl_name, creation_metadata)
+       send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata)
+       recv_update_creation_metadata()
+     end
+ 
+     def send_update_creation_metadata(catName, dbname, tbl_name, creation_metadata)
+       send_message('update_creation_metadata', Update_creation_metadata_args, :catName => catName, :dbname => dbname, :tbl_name => tbl_name, :creation_metadata => creation_metadata)
+     end
+ 
+     def recv_update_creation_metadata()
+       result = receive_message(Update_creation_metadata_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def get_table_names_by_filter(dbname, filter, max_tables)
+       send_get_table_names_by_filter(dbname, filter, max_tables)
+       return recv_get_table_names_by_filter()
+     end
+ 
+     def send_get_table_names_by_filter(dbname, filter, max_tables)
+       send_message('get_table_names_by_filter', Get_table_names_by_filter_args, :dbname => dbname, :filter => filter, :max_tables => max_tables)
+     end
+ 
+     def recv_get_table_names_by_filter()
+       result = receive_message(Get_table_names_by_filter_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_names_by_filter failed: unknown result')
+     end
+ 
+     def alter_table(dbname, tbl_name, new_tbl)
+       send_alter_table(dbname, tbl_name, new_tbl)
+       recv_alter_table()
+     end
+ 
+     def send_alter_table(dbname, tbl_name, new_tbl)
+       send_message('alter_table', Alter_table_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl)
+     end
+ 
+     def recv_alter_table()
+       result = receive_message(Alter_table_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context)
+       send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context)
+       recv_alter_table_with_environment_context()
+     end
+ 
+     def send_alter_table_with_environment_context(dbname, tbl_name, new_tbl, environment_context)
+       send_message('alter_table_with_environment_context', Alter_table_with_environment_context_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :environment_context => environment_context)
+     end
+ 
+     def recv_alter_table_with_environment_context()
+       result = receive_message(Alter_table_with_environment_context_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+       send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+       recv_alter_table_with_cascade()
+     end
+ 
+     def send_alter_table_with_cascade(dbname, tbl_name, new_tbl, cascade)
+       send_message('alter_table_with_cascade', Alter_table_with_cascade_args, :dbname => dbname, :tbl_name => tbl_name, :new_tbl => new_tbl, :cascade => cascade)
+     end
+ 
+     def recv_alter_table_with_cascade()
+       result = receive_message(Alter_table_with_cascade_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def add_partition(new_part)
+       send_add_partition(new_part)
+       return recv_add_partition()
+     end
+ 
+     def send_add_partition(new_part)
+       send_message('add_partition', Add_partition_args, :new_part => new_part)
+     end
+ 
+     def recv_add_partition()
+       result = receive_message(Add_partition_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partition failed: unknown result')
+     end
+ 
+     def add_partition_with_environment_context(new_part, environment_context)
+       send_add_partition_with_environment_context(new_part, environment_context)
+       return recv_add_partition_with_environment_context()
+     end
+ 
+     def send_add_partition_with_environment_context(new_part, environment_context)
+       send_message('add_partition_with_environment_context', Add_partition_with_environment_context_args, :new_part => new_part, :environment_context => environment_context)
+     end
+ 
+     def recv_add_partition_with_environment_context()
+       result = receive_message(Add_partition_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partition_with_environment_context failed: unknown result')
+     end
+ 
+     def add_partitions(new_parts)
+       send_add_partitions(new_parts)
+       return recv_add_partitions()
+     end
+ 
+     def send_add_partitions(new_parts)
+       send_message('add_partitions', Add_partitions_args, :new_parts => new_parts)
+     end
+ 
+     def recv_add_partitions()
+       result = receive_message(Add_partitions_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions failed: unknown result')
+     end
+ 
+     def add_partitions_pspec(new_parts)
+       send_add_partitions_pspec(new_parts)
+       return recv_add_partitions_pspec()
+     end
+ 
+     def send_add_partitions_pspec(new_parts)
+       send_message('add_partitions_pspec', Add_partitions_pspec_args, :new_parts => new_parts)
+     end
+ 
+     def recv_add_partitions_pspec()
+       result = receive_message(Add_partitions_pspec_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions_pspec failed: unknown result')
+     end
+ 
+     def append_partition(db_name, tbl_name, part_vals)
+       send_append_partition(db_name, tbl_name, part_vals)
+       return recv_append_partition()
+     end
+ 
+     def send_append_partition(db_name, tbl_name, part_vals)
+       send_message('append_partition', Append_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals)
+     end
+ 
+     def recv_append_partition()
+       result = receive_message(Append_partition_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'append_partition failed: unknown result')
+     end
+ 
+     def add_partitions_req(request)
+       send_add_partitions_req(request)
+       return recv_add_partitions_req()
+     end
+ 
+     def send_add_partitions_req(request)
+       send_message('add_partitions_req', Add_partitions_req_args, :request => request)
+     end
+ 
+     def recv_add_partitions_req()
+       result = receive_message(Add_partitions_req_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'add_partitions_req failed: unknown result')
+     end
+ 
+     def append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context)
+       send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context)
+       return recv_append_partition_with_environment_context()
+     end
+ 
+     def send_append_partition_with_environment_context(db_name, tbl_name, part_vals, environment_context)
+       send_message('append_partition_with_environment_context', Append_partition_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :environment_context => environment_context)
+     end
+ 
+     def recv_append_partition_with_environment_context()
+       result = receive_message(Append_partition_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'append_partition_with_environment_context failed: unknown result')
+     end
+ 
+     def append_partition_by_name(db_name, tbl_name, part_name)
+       send_append_partition_by_name(db_name, tbl_name, part_name)
+       return recv_append_partition_by_name()
+     end
+ 
+     def send_append_partition_by_name(db_name, tbl_name, part_name)
+       send_message('append_partition_by_name', Append_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name)
+     end
+ 
+     def recv_append_partition_by_name()
+       result = receive_message(Append_partition_by_name_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'append_partition_by_name failed: unknown result')
+     end
+ 
+     def append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context)
+       send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context)
+       return recv_append_partition_by_name_with_environment_context()
+     end
+ 
+     def send_append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, environment_context)
+       send_message('append_partition_by_name_with_environment_context', Append_partition_by_name_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :environment_context => environment_context)
+     end
+ 
+     def recv_append_partition_by_name_with_environment_context()
+       result = receive_message(Append_partition_by_name_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'append_partition_by_name_with_environment_context failed: unknown result')
+     end
+ 
+     def drop_partition(db_name, tbl_name, part_vals, deleteData)
+       send_drop_partition(db_name, tbl_name, part_vals, deleteData)
+       return recv_drop_partition()
+     end
+ 
+     def send_drop_partition(db_name, tbl_name, part_vals, deleteData)
+       send_message('drop_partition', Drop_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :deleteData => deleteData)
+     end
+ 
+     def recv_drop_partition()
+       result = receive_message(Drop_partition_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition failed: unknown result')
+     end
+ 
+     def drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context)
+       send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context)
+       return recv_drop_partition_with_environment_context()
+     end
+ 
+     def send_drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, environment_context)
+       send_message('drop_partition_with_environment_context', Drop_partition_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :deleteData => deleteData, :environment_context => environment_context)
+     end
+ 
+     def recv_drop_partition_with_environment_context()
+       result = receive_message(Drop_partition_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition_with_environment_context failed: unknown result')
+     end
+ 
+     def drop_partition_by_name(db_name, tbl_name, part_name, deleteData)
+       send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData)
+       return recv_drop_partition_by_name()
+     end
+ 
+     def send_drop_partition_by_name(db_name, tbl_name, part_name, deleteData)
+       send_message('drop_partition_by_name', Drop_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :deleteData => deleteData)
+     end
+ 
+     def recv_drop_partition_by_name()
+       result = receive_message(Drop_partition_by_name_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition_by_name failed: unknown result')
+     end
+ 
+     def drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context)
+       send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context)
+       return recv_drop_partition_by_name_with_environment_context()
+     end
+ 
+     def send_drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, deleteData, environment_context)
+       send_message('drop_partition_by_name_with_environment_context', Drop_partition_by_name_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :deleteData => deleteData, :environment_context => environment_context)
+     end
+ 
+     def recv_drop_partition_by_name_with_environment_context()
+       result = receive_message(Drop_partition_by_name_with_environment_context_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partition_by_name_with_environment_context failed: unknown result')
+     end
+ 
+     def drop_partitions_req(req)
+       send_drop_partitions_req(req)
+       return recv_drop_partitions_req()
+     end
+ 
+     def send_drop_partitions_req(req)
+       send_message('drop_partitions_req', Drop_partitions_req_args, :req => req)
+     end
+ 
+     def recv_drop_partitions_req()
+       result = receive_message(Drop_partitions_req_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_partitions_req failed: unknown result')
+     end
+ 
+     def get_partition(db_name, tbl_name, part_vals)
+       send_get_partition(db_name, tbl_name, part_vals)
+       return recv_get_partition()
+     end
+ 
+     def send_get_partition(db_name, tbl_name, part_vals)
+       send_message('get_partition', Get_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals)
+     end
+ 
+     def recv_get_partition()
+       result = receive_message(Get_partition_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition failed: unknown result')
+     end
+ 
+     def exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+       send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+       return recv_exchange_partition()
+     end
+ 
+     def send_exchange_partition(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+       send_message('exchange_partition', Exchange_partition_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name)
+     end
+ 
+     def recv_exchange_partition()
+       result = receive_message(Exchange_partition_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partition failed: unknown result')
+     end
+ 
+     def exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+       send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+       return recv_exchange_partitions()
+     end
+ 
+     def send_exchange_partitions(partitionSpecs, source_db, source_table_name, dest_db, dest_table_name)
+       send_message('exchange_partitions', Exchange_partitions_args, :partitionSpecs => partitionSpecs, :source_db => source_db, :source_table_name => source_table_name, :dest_db => dest_db, :dest_table_name => dest_table_name)
+     end
+ 
+     def recv_exchange_partitions()
+       result = receive_message(Exchange_partitions_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'exchange_partitions failed: unknown result')
+     end
+ 
+     def get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
+       send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
+       return recv_get_partition_with_auth()
+     end
+ 
+     def send_get_partition_with_auth(db_name, tbl_name, part_vals, user_name, group_names)
+       send_message('get_partition_with_auth', Get_partition_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :user_name => user_name, :group_names => group_names)
+     end
+ 
+     def recv_get_partition_with_auth()
+       result = receive_message(Get_partition_with_auth_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_with_auth failed: unknown result')
+     end
+ 
+     def get_partition_by_name(db_name, tbl_name, part_name)
+       send_get_partition_by_name(db_name, tbl_name, part_name)
+       return recv_get_partition_by_name()
+     end
+ 
+     def send_get_partition_by_name(db_name, tbl_name, part_name)
+       send_message('get_partition_by_name', Get_partition_by_name_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name)
+     end
+ 
+     def recv_get_partition_by_name()
+       result = receive_message(Get_partition_by_name_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_by_name failed: unknown result')
+     end
+ 
+     def get_partitions(db_name, tbl_name, max_parts)
+       send_get_partitions(db_name, tbl_name, max_parts)
+       return recv_get_partitions()
+     end
+ 
+     def send_get_partitions(db_name, tbl_name, max_parts)
+       send_message('get_partitions', Get_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts)
+     end
+ 
+     def recv_get_partitions()
+       result = receive_message(Get_partitions_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions failed: unknown result')
+     end
+ 
+     def get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names)
+       send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names)
+       return recv_get_partitions_with_auth()
+     end
+ 
+     def send_get_partitions_with_auth(db_name, tbl_name, max_parts, user_name, group_names)
+       send_message('get_partitions_with_auth', Get_partitions_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts, :user_name => user_name, :group_names => group_names)
+     end
+ 
+     def recv_get_partitions_with_auth()
+       result = receive_message(Get_partitions_with_auth_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_with_auth failed: unknown result')
+     end
+ 
+     def get_partitions_pspec(db_name, tbl_name, max_parts)
+       send_get_partitions_pspec(db_name, tbl_name, max_parts)
+       return recv_get_partitions_pspec()
+     end
+ 
+     def send_get_partitions_pspec(db_name, tbl_name, max_parts)
+       send_message('get_partitions_pspec', Get_partitions_pspec_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts)
+     end
+ 
+     def recv_get_partitions_pspec()
+       result = receive_message(Get_partitions_pspec_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_pspec failed: unknown result')
+     end
+ 
+     def get_partition_names(db_name, tbl_name, max_parts)
+       send_get_partition_names(db_name, tbl_name, max_parts)
+       return recv_get_partition_names()
+     end
+ 
+     def send_get_partition_names(db_name, tbl_name, max_parts)
+       send_message('get_partition_names', Get_partition_names_args, :db_name => db_name, :tbl_name => tbl_name, :max_parts => max_parts)
+     end
+ 
+     def recv_get_partition_names()
+       result = receive_message(Get_partition_names_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_names failed: unknown result')
+     end
+ 
+     def get_partition_values(request)
+       send_get_partition_values(request)
+       return recv_get_partition_values()
+     end
+ 
+     def send_get_partition_values(request)
+       send_message('get_partition_values', Get_partition_values_args, :request => request)
+     end
+ 
+     def recv_get_partition_values()
+       result = receive_message(Get_partition_values_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_values failed: unknown result')
+     end
+ 
+     def get_partitions_ps(db_name, tbl_name, part_vals, max_parts)
+       send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts)
+       return recv_get_partitions_ps()
+     end
+ 
+     def send_get_partitions_ps(db_name, tbl_name, part_vals, max_parts)
+       send_message('get_partitions_ps', Get_partitions_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts)
+     end
+ 
+     def recv_get_partitions_ps()
+       result = receive_message(Get_partitions_ps_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_ps failed: unknown result')
+     end
+ 
+     def get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names)
+       send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names)
+       return recv_get_partitions_ps_with_auth()
+     end
+ 
+     def send_get_partitions_ps_with_auth(db_name, tbl_name, part_vals, max_parts, user_name, group_names)
+       send_message('get_partitions_ps_with_auth', Get_partitions_ps_with_auth_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts, :user_name => user_name, :group_names => group_names)
+     end
+ 
+     def recv_get_partitions_ps_with_auth()
+       result = receive_message(Get_partitions_ps_with_auth_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_ps_with_auth failed: unknown result')
+     end
+ 
+     def get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)
+       send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)
+       return recv_get_partition_names_ps()
+     end
+ 
+     def send_get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)
+       send_message('get_partition_names_ps', Get_partition_names_ps_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :max_parts => max_parts)
+     end
+ 
+     def recv_get_partition_names_ps()
+       result = receive_message(Get_partition_names_ps_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_names_ps failed: unknown result')
+     end
+ 
+     def get_partitions_by_filter(db_name, tbl_name, filter, max_parts)
+       send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts)
+       return recv_get_partitions_by_filter()
+     end
+ 
+     def send_get_partitions_by_filter(db_name, tbl_name, filter, max_parts)
+       send_message('get_partitions_by_filter', Get_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts)
+     end
+ 
+     def recv_get_partitions_by_filter()
+       result = receive_message(Get_partitions_by_filter_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_filter failed: unknown result')
+     end
+ 
+     def get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)
+       send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)
+       return recv_get_part_specs_by_filter()
+     end
+ 
+     def send_get_part_specs_by_filter(db_name, tbl_name, filter, max_parts)
+       send_message('get_part_specs_by_filter', Get_part_specs_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter, :max_parts => max_parts)
+     end
+ 
+     def recv_get_part_specs_by_filter()
+       result = receive_message(Get_part_specs_by_filter_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_part_specs_by_filter failed: unknown result')
+     end
+ 
+     def get_partitions_by_expr(req)
+       send_get_partitions_by_expr(req)
+       return recv_get_partitions_by_expr()
+     end
+ 
+     def send_get_partitions_by_expr(req)
+       send_message('get_partitions_by_expr', Get_partitions_by_expr_args, :req => req)
+     end
+ 
+     def recv_get_partitions_by_expr()
+       result = receive_message(Get_partitions_by_expr_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_expr failed: unknown result')
+     end
+ 
+     def get_num_partitions_by_filter(db_name, tbl_name, filter)
+       send_get_num_partitions_by_filter(db_name, tbl_name, filter)
+       return recv_get_num_partitions_by_filter()
+     end
+ 
+     def send_get_num_partitions_by_filter(db_name, tbl_name, filter)
+       send_message('get_num_partitions_by_filter', Get_num_partitions_by_filter_args, :db_name => db_name, :tbl_name => tbl_name, :filter => filter)
+     end
+ 
+     def recv_get_num_partitions_by_filter()
+       result = receive_message(Get_num_partitions_by_filter_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_num_partitions_by_filter failed: unknown result')
+     end
+ 
+     def get_partitions_by_names(db_name, tbl_name, names)
+       send_get_partitions_by_names(db_name, tbl_name, names)
+       return recv_get_partitions_by_names()
+     end
+ 
+     def send_get_partitions_by_names(db_name, tbl_name, names)
+       send_message('get_partitions_by_names', Get_partitions_by_names_args, :db_name => db_name, :tbl_name => tbl_name, :names => names)
+     end
+ 
+     def recv_get_partitions_by_names()
+       result = receive_message(Get_partitions_by_names_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_by_names failed: unknown result')
+     end
+ 
+     def alter_partition(db_name, tbl_name, new_part)
+       send_alter_partition(db_name, tbl_name, new_part)
+       recv_alter_partition()
+     end
+ 
+     def send_alter_partition(db_name, tbl_name, new_part)
+       send_message('alter_partition', Alter_partition_args, :db_name => db_name, :tbl_name => tbl_name, :new_part => new_part)
+     end
+ 
+     def recv_alter_partition()
+       result = receive_message(Alter_partition_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def alter_partitions(db_name, tbl_name, new_parts)
+       send_alter_partitions(db_name, tbl_name, new_parts)
+       recv_alter_partitions()
+     end
+ 
+     def send_alter_partitions(db_name, tbl_name, new_parts)
+       send_message('alter_partitions', Alter_partitions_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts)
+     end
+ 
+     def recv_alter_partitions()
+       result = receive_message(Alter_partitions_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
+       send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
+       recv_alter_partitions_with_environment_context()
+     end
+ 
+     def send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
+       send_message('alter_partitions_with_environment_context', Alter_partitions_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_parts => new_parts, :environment_context => environment_context)
+     end
+ 
+     def recv_alter_partitions_with_environment_context()
+       result = receive_message(Alter_partitions_with_environment_context_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
++    def alter_partitions_with_environment_context_req(req)
++      send_alter_partitions_with_environment_context_req(req)
++      return recv_alter_partitions_with_environment_context_req()
++    end
++
++    def send_alter_partitions_with_environment_context_req(req)
++      send_message('alter_partitions_with_environment_context_req', Alter_partitions_with_environment_context_req_args, :req => req)
++    end
++
++    def recv_alter_partitions_with_environment_context_req()
++      result = receive_message(Alter_partitions_with_environment_context_req_result)
++      return result.success unless result.success.nil?
++      raise result.o1 unless result.o1.nil?
++      raise result.o2 unless result.o2.nil?
++      raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'alter_partitions_with_environment_context_req failed: unknown result')
++    end
++
+     def alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
+       send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
+       recv_alter_partition_with_environment_context()
+     end
+ 
+     def send_alter_partition_with_environment_context(db_name, tbl_name, new_part, environment_context)
+       send_message('alter_partition_with_environment_context', Alter_partition_with_environment_context_args, :db_name => db_name, :tbl_name => tbl_name, :new_part => new_part, :environment_context => environment_context)
+     end
+ 
+     def recv_alter_partition_with_environment_context()
+       result = receive_message(Alter_partition_with_environment_context_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def rename_partition(db_name, tbl_name, part_vals, new_part)
+       send_rename_partition(db_name, tbl_name, part_vals, new_part)
+       recv_rename_partition()
+     end
+ 
+     def send_rename_partition(db_name, tbl_name, part_vals, new_part)
+       send_message('rename_partition', Rename_partition_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :new_part => new_part)
+     end
+ 
+     def recv_rename_partition()
+       result = receive_message(Rename_partition_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def partition_name_has_valid_characters(part_vals, throw_exception)
+       send_partition_name_has_valid_characters(part_vals, throw_exception)
+       return recv_partition_name_has_valid_characters()
+     end
+ 
+     def send_partition_name_has_valid_characters(part_vals, throw_exception)
+       send_message('partition_name_has_valid_characters', Partition_name_has_valid_characters_args, :part_vals => part_vals, :throw_exception => throw_exception)
+     end
+ 
+     def recv_partition_name_has_valid_characters()
+       result = receive_message(Partition_name_has_valid_characters_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'partition_name_has_valid_characters failed: unknown result')
+     end
+ 
+     def get_config_value(name, defaultValue)
+       send_get_config_value(name, defaultValue)
+       return recv_get_config_value()
+     end
+ 
+     def send_get_config_value(name, defaultValue)
+       send_message('get_config_value', Get_config_value_args, :name => name, :defaultValue => defaultValue)
+     end
+ 
+     def recv_get_config_value()
+       result = receive_message(Get_config_value_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_config_value failed: unknown result')
+     end
+ 
+     def partition_name_to_vals(part_name)
+       send_partition_name_to_vals(part_name)
+       return recv_partition_name_to_vals()
+     end
+ 
+     def send_partition_name_to_vals(part_name)
+       send_message('partition_name_to_vals', Partition_name_to_vals_args, :part_name => part_name)
+     end
+ 
+     def recv_partition_name_to_vals()
+       result = receive_message(Partition_name_to_vals_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'partition_name_to_vals failed: unknown result')
+     end
+ 
+     def partition_name_to_spec(part_name)
+       send_partition_name_to_spec(part_name)
+       return recv_partition_name_to_spec()
+     end
+ 
+     def send_partition_name_to_spec(part_name)
+       send_message('partition_name_to_spec', Partition_name_to_spec_args, :part_name => part_name)
+     end
+ 
+     def recv_partition_name_to_spec()
+       result = receive_message(Partition_name_to_spec_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'partition_name_to_spec failed: unknown result')
+     end
+ 
+     def markPartitionForEvent(db_name, tbl_name, part_vals, eventType)
+       send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType)
+       recv_markPartitionForEvent()
+     end
+ 
+     def send_markPartitionForEvent(db_name, tbl_name, part_vals, eventType)
+       send_message('markPartitionForEvent', MarkPartitionForEvent_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :eventType => eventType)
+     end
+ 
+     def recv_markPartitionForEvent()
+       result = receive_message(MarkPartitionForEvent_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise result.o5 unless result.o5.nil?
+       raise result.o6 unless result.o6.nil?
+       return
+     end
+ 
+     def isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType)
+       send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType)
+       return recv_isPartitionMarkedForEvent()
+     end
+ 
+     def send_isPartitionMarkedForEvent(db_name, tbl_name, part_vals, eventType)
+       send_message('isPartitionMarkedForEvent', IsPartitionMarkedForEvent_args, :db_name => db_name, :tbl_name => tbl_name, :part_vals => part_vals, :eventType => eventType)
+     end
+ 
+     def recv_isPartitionMarkedForEvent()
+       result = receive_message(IsPartitionMarkedForEvent_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise result.o5 unless result.o5.nil?
+       raise result.o6 unless result.o6.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'isPartitionMarkedForEvent failed: unknown result')
+     end
+ 
+     def get_primary_keys(request)
+       send_get_primary_keys(request)
+       return recv_get_primary_keys()
+     end
+ 
+     def send_get_primary_keys(request)
+       send_message('get_primary_keys', Get_primary_keys_args, :request => request)
+     end
+ 
+     def recv_get_primary_keys()
+       result = receive_message(Get_primary_keys_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_primary_keys failed: unknown result')
+     end
+ 
+     def get_foreign_keys(request)
+       send_get_foreign_keys(request)
+       return recv_get_foreign_keys()
+     end
+ 
+     def send_get_foreign_keys(request)
+       send_message('get_foreign_keys', Get_foreign_keys_args, :request => request)
+     end
+ 
+     def recv_get_foreign_keys()
+       result = receive_message(Get_foreign_keys_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_foreign_keys failed: unknown result')
+     end
+ 
+     def get_unique_constraints(request)
+       send_get_unique_constraints(request)
+       return recv_get_unique_constraints()
+     end
+ 
+     def send_get_unique_constraints(request)
+       send_message('get_unique_constraints', Get_unique_constraints_args, :request => request)
+     end
+ 
+     def recv_get_unique_constraints()
+       result = receive_message(Get_unique_constraints_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_unique_constraints failed: unknown result')
+     end
+ 
+     def get_not_null_constraints(request)
+       send_get_not_null_constraints(request)
+       return recv_get_not_null_constraints()
+     end
+ 
+     def send_get_not_null_constraints(request)
+       send_message('get_not_null_constraints', Get_not_null_constraints_args, :request => request)
+     end
+ 
+     def recv_get_not_null_constraints()
+       result = receive_message(Get_not_null_constraints_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_not_null_constraints failed: unknown result')
+     end
+ 
+     def get_default_constraints(request)
+       send_get_default_constraints(request)
+       return recv_get_default_constraints()
+     end
+ 
+     def send_get_default_constraints(request)
+       send_message('get_default_constraints', Get_default_constraints_args, :request => request)
+     end
+ 
+     def recv_get_default_constraints()
+       result = receive_message(Get_default_constraints_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_default_constraints failed: unknown result')
+     end
+ 
+     def get_check_constraints(request)
+       send_get_check_constraints(request)
+       return recv_get_check_constraints()
+     end
+ 
+     def send_get_check_constraints(request)
+       send_message('get_check_constraints', Get_check_constraints_args, :request => request)
+     end
+ 
+     def recv_get_check_constraints()
+       result = receive_message(Get_check_constraints_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_check_constraints failed: unknown result')
+     end
+ 
+     def update_table_column_statistics(stats_obj)
+       send_update_table_column_statistics(stats_obj)
+       return recv_update_table_column_statistics()
+     end
+ 
+     def send_update_table_column_statistics(stats_obj)
+       send_message('update_table_column_statistics', Update_table_column_statistics_args, :stats_obj => stats_obj)
+     end
+ 
+     def recv_update_table_column_statistics()
+       result = receive_message(Update_table_column_statistics_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_table_column_statistics failed: unknown result')
+     end
+ 
+     def update_partition_column_statistics(stats_obj)
+       send_update_partition_column_statistics(stats_obj)
+       return recv_update_partition_column_statistics()
+     end
+ 
+     def send_update_partition_column_statistics(stats_obj)
+       send_message('update_partition_column_statistics', Update_partition_column_statistics_args, :stats_obj => stats_obj)
+     end
+ 
+     def recv_update_partition_column_statistics()
+       result = receive_message(Update_partition_column_statistics_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'update_partition_column_statistics failed: unknown result')
+     end
+ 
+     def get_table_column_statistics(db_name, tbl_name, col_name)
+       send_get_table_column_statistics(db_name, tbl_name, col_name)
+       return recv_get_table_column_statistics()
+     end
+ 
+     def send_get_table_column_statistics(db_name, tbl_name, col_name)
+       send_message('get_table_column_statistics', Get_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name)
+     end
+ 
+     def recv_get_table_column_statistics()
+       result = receive_message(Get_table_column_statistics_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_column_statistics failed: unknown result')
+     end
+ 
+     def get_partition_column_statistics(db_name, tbl_name, part_name, col_name)
+       send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name)
+       return recv_get_partition_column_statistics()
+     end
+ 
+     def send_get_partition_column_statistics(db_name, tbl_name, part_name, col_name)
+       send_message('get_partition_column_statistics', Get_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name)
+     end
+ 
+     def recv_get_partition_column_statistics()
+       result = receive_message(Get_partition_column_statistics_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partition_column_statistics failed: unknown result')
+     end
+ 
+     def get_table_statistics_req(request)
+       send_get_table_statistics_req(request)
+       return recv_get_table_statistics_req()
+     end
+ 
+     def send_get_table_statistics_req(request)
+       send_message('get_table_statistics_req', Get_table_statistics_req_args, :request => request)
+     end
+ 
+     def recv_get_table_statistics_req()
+       result = receive_message(Get_table_statistics_req_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_table_statistics_req failed: unknown result')
+     end
+ 
+     def get_partitions_statistics_req(request)
+       send_get_partitions_statistics_req(request)
+       return recv_get_partitions_statistics_req()
+     end
+ 
+     def send_get_partitions_statistics_req(request)
+       send_message('get_partitions_statistics_req', Get_partitions_statistics_req_args, :request => request)
+     end
+ 
+     def recv_get_partitions_statistics_req()
+       result = receive_message(Get_partitions_statistics_req_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_partitions_statistics_req failed: unknown result')
+     end
+ 
+     def get_aggr_stats_for(request)
+       send_get_aggr_stats_for(request)
+       return recv_get_aggr_stats_for()
+     end
+ 
+     def send_get_aggr_stats_for(request)
+       send_message('get_aggr_stats_for', Get_aggr_stats_for_args, :request => request)
+     end
+ 
+     def recv_get_aggr_stats_for()
+       result = receive_message(Get_aggr_stats_for_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_aggr_stats_for failed: unknown result')
+     end
+ 
+     def set_aggr_stats_for(request)
+       send_set_aggr_stats_for(request)
+       return recv_set_aggr_stats_for()
+     end
+ 
+     def send_set_aggr_stats_for(request)
+       send_message('set_aggr_stats_for', Set_aggr_stats_for_args, :request => request)
+     end
+ 
+     def recv_set_aggr_stats_for()
+       result = receive_message(Set_aggr_stats_for_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'set_aggr_stats_for failed: unknown result')
+     end
+ 
+     def delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
+       send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
+       return recv_delete_partition_column_statistics()
+     end
+ 
+     def send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name)
+       send_message('delete_partition_column_statistics', Delete_partition_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :part_name => part_name, :col_name => col_name)
+     end
+ 
+     def recv_delete_partition_column_statistics()
+       result = receive_message(Delete_partition_column_statistics_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'delete_partition_column_statistics failed: unknown result')
+     end
+ 
+     def delete_table_column_statistics(db_name, tbl_name, col_name)
+       send_delete_table_column_statistics(db_name, tbl_name, col_name)
+       return recv_delete_table_column_statistics()
+     end
+ 
+     def send_delete_table_column_statistics(db_name, tbl_name, col_name)
+       send_message('delete_table_column_statistics', Delete_table_column_statistics_args, :db_name => db_name, :tbl_name => tbl_name, :col_name => col_name)
+     end
+ 
+     def recv_delete_table_column_statistics()
+       result = receive_message(Delete_table_column_statistics_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'delete_table_column_statistics failed: unknown result')
+     end
+ 
+     def create_function(func)
+       send_create_function(func)
+       recv_create_function()
+     end
+ 
+     def send_create_function(func)
+       send_message('create_function', Create_function_args, :func => func)
+     end
+ 
+     def recv_create_function()
+       result = receive_message(Create_function_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise result.o3 unless result.o3.nil?
+       raise result.o4 unless result.o4.nil?
+       return
+     end
+ 
+     def drop_function(dbName, funcName)
+       send_drop_function(dbName, funcName)
+       recv_drop_function()
+     end
+ 
+     def send_drop_function(dbName, funcName)
+       send_message('drop_function', Drop_function_args, :dbName => dbName, :funcName => funcName)
+     end
+ 
+     def recv_drop_function()
+       result = receive_message(Drop_function_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o3 unless result.o3.nil?
+       return
+     end
+ 
+     def alter_function(dbName, funcName, newFunc)
+       send_alter_function(dbName, funcName, newFunc)
+       recv_alter_function()
+     end
+ 
+     def send_alter_function(dbName, funcName, newFunc)
+       send_message('alter_function', Alter_function_args, :dbName => dbName, :funcName => funcName, :newFunc => newFunc)
+     end
+ 
+     def recv_alter_function()
+       result = receive_message(Alter_function_result)
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       return
+     end
+ 
+     def get_functions(dbName, pattern)
+       send_get_functions(dbName, pattern)
+       return recv_get_functions()
+     end
+ 
+     def send_get_functions(dbName, pattern)
+       send_message('get_functions', Get_functions_args, :dbName => dbName, :pattern => pattern)
+     end
+ 
+     def recv_get_functions()
+       result = receive_message(Get_functions_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_functions failed: unknown result')
+     end
+ 
+     def get_function(dbName, funcName)
+       send_get_function(dbName, funcName)
+       return recv_get_function()
+     end
+ 
+     def send_get_function(dbName, funcName)
+       send_message('get_function', Get_function_args, :dbName => dbName, :funcName => funcName)
+     end
+ 
+     def recv_get_function()
+       result = receive_message(Get_function_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise result.o2 unless result.o2.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_function failed: unknown result')
+     end
+ 
+     def get_all_functions()
+       send_get_all_functions()
+       return recv_get_all_functions()
+     end
+ 
+     def send_get_all_functions()
+       send_message('get_all_functions', Get_all_functions_args)
+     end
+ 
+     def recv_get_all_functions()
+       result = receive_message(Get_all_functions_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_all_functions failed: unknown result')
+     end
+ 
+     def create_role(role)
+       send_create_role(role)
+       return recv_create_role()
+     end
+ 
+     def send_create_role(role)
+       send_message('create_role', Create_role_args, :role => role)
+     end
+ 
+     def recv_create_role()
+       result = receive_message(Create_role_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'create_role failed: unknown result')
+     end
+ 
+     def drop_role(role_name)
+       send_drop_role(role_name)
+       return recv_drop_role()
+     end
+ 
+     def send_drop_role(role_name)
+       send_message('drop_role', Drop_role_args, :role_name => role_name)
+     end
+ 
+     def recv_drop_role()
+       result = receive_message(Drop_role_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'drop_role failed: unknown result')
+     end
+ 
+     def get_role_names()
+       send_get_role_names()
+       return recv_get_role_names()
+     end
+ 
+     def send_get_role_names()
+       send_message('get_role_names', Get_role_names_args)
+     end
+ 
+     def recv_get_role_names()
+       result = receive_message(Get_role_names_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_role_names failed: unknown result')
+     end
+ 
+     def grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option)
+       send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option)
+       return recv_grant_role()
+     end
+ 
+     def send_grant_role(role_name, principal_name, principal_type, grantor, grantorType, grant_option)
+       send_message('grant_role', Grant_role_args, :role_name => role_name, :principal_name => principal_name, :principal_type => principal_type, :grantor => grantor, :grantorType => grantorType, :grant_option => grant_option)
+     end
+ 
+     def recv_grant_role()
+       result = receive_message(Grant_role_result)
+       return result.success unless result.success.nil?
+       raise result.o1 unless result.o1.nil?
+       raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'grant_role failed: unknown result')
+     end
+ 
+     def revoke_role(role_name, principal_name, principal_type)
+       send_revoke_role(role_name, principal_name, principal_type)
+       return recv_revoke_role()
+     end
+ 
+     def send_revoke_role(role_name, principal_name, principal_type)
+       send_message('revoke_role', Revoke_role_args, :role_name => role_name, :principal_name => principal_name, :principal_type => principal_type)
+     end
+ 
+     def rec

<TRUNCATED>

[05/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
new file mode 100644
index 0000000..fa760ed
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ResourceUri.java
@@ -0,0 +1,511 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class ResourceUri implements org.apache.thrift.TBase<ResourceUri, ResourceUri._Fields>, java.io.Serializable, Cloneable, Comparable<ResourceUri> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ResourceUri");
+
+  private static final org.apache.thrift.protocol.TField RESOURCE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceType", org.apache.thrift.protocol.TType.I32, (short)1);
+  private static final org.apache.thrift.protocol.TField URI_FIELD_DESC = new org.apache.thrift.protocol.TField("uri", org.apache.thrift.protocol.TType.STRING, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new ResourceUriStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new ResourceUriTupleSchemeFactory());
+  }
+
+  private ResourceType resourceType; // required
+  private String uri; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    /**
+     * 
+     * @see ResourceType
+     */
+    RESOURCE_TYPE((short)1, "resourceType"),
+    URI((short)2, "uri");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // RESOURCE_TYPE
+          return RESOURCE_TYPE;
+        case 2: // URI
+          return URI;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.RESOURCE_TYPE, new org.apache.thrift.meta_data.FieldMetaData("resourceType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResourceType.class)));
+    tmpMap.put(_Fields.URI, new org.apache.thrift.meta_data.FieldMetaData("uri", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ResourceUri.class, metaDataMap);
+  }
+
+  public ResourceUri() {
+  }
+
+  public ResourceUri(
+    ResourceType resourceType,
+    String uri)
+  {
+    this();
+    this.resourceType = resourceType;
+    this.uri = uri;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public ResourceUri(ResourceUri other) {
+    if (other.isSetResourceType()) {
+      this.resourceType = other.resourceType;
+    }
+    if (other.isSetUri()) {
+      this.uri = other.uri;
+    }
+  }
+
+  public ResourceUri deepCopy() {
+    return new ResourceUri(this);
+  }
+
+  @Override
+  public void clear() {
+    this.resourceType = null;
+    this.uri = null;
+  }
+
+  /**
+   * 
+   * @see ResourceType
+   */
+  public ResourceType getResourceType() {
+    return this.resourceType;
+  }
+
+  /**
+   * 
+   * @see ResourceType
+   */
+  public void setResourceType(ResourceType resourceType) {
+    this.resourceType = resourceType;
+  }
+
+  public void unsetResourceType() {
+    this.resourceType = null;
+  }
+
+  /** Returns true if field resourceType is set (has been assigned a value) and false otherwise */
+  public boolean isSetResourceType() {
+    return this.resourceType != null;
+  }
+
+  public void setResourceTypeIsSet(boolean value) {
+    if (!value) {
+      this.resourceType = null;
+    }
+  }
+
+  public String getUri() {
+    return this.uri;
+  }
+
+  public void setUri(String uri) {
+    this.uri = uri;
+  }
+
+  public void unsetUri() {
+    this.uri = null;
+  }
+
+  /** Returns true if field uri is set (has been assigned a value) and false otherwise */
+  public boolean isSetUri() {
+    return this.uri != null;
+  }
+
+  public void setUriIsSet(boolean value) {
+    if (!value) {
+      this.uri = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case RESOURCE_TYPE:
+      if (value == null) {
+        unsetResourceType();
+      } else {
+        setResourceType((ResourceType)value);
+      }
+      break;
+
+    case URI:
+      if (value == null) {
+        unsetUri();
+      } else {
+        setUri((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case RESOURCE_TYPE:
+      return getResourceType();
+
+    case URI:
+      return getUri();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case RESOURCE_TYPE:
+      return isSetResourceType();
+    case URI:
+      return isSetUri();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof ResourceUri)
+      return this.equals((ResourceUri)that);
+    return false;
+  }
+
+  public boolean equals(ResourceUri that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_resourceType = true && this.isSetResourceType();
+    boolean that_present_resourceType = true && that.isSetResourceType();
+    if (this_present_resourceType || that_present_resourceType) {
+      if (!(this_present_resourceType && that_present_resourceType))
+        return false;
+      if (!this.resourceType.equals(that.resourceType))
+        return false;
+    }
+
+    boolean this_present_uri = true && this.isSetUri();
+    boolean that_present_uri = true && that.isSetUri();
+    if (this_present_uri || that_present_uri) {
+      if (!(this_present_uri && that_present_uri))
+        return false;
+      if (!this.uri.equals(that.uri))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_resourceType = true && (isSetResourceType());
+    list.add(present_resourceType);
+    if (present_resourceType)
+      list.add(resourceType.getValue());
+
+    boolean present_uri = true && (isSetUri());
+    list.add(present_uri);
+    if (present_uri)
+      list.add(uri);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(ResourceUri other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetResourceType()).compareTo(other.isSetResourceType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetResourceType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourceType, other.resourceType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetUri()).compareTo(other.isSetUri());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUri()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uri, other.uri);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("ResourceUri(");
+    boolean first = true;
+
+    sb.append("resourceType:");
+    if (this.resourceType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.resourceType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("uri:");
+    if (this.uri == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.uri);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class ResourceUriStandardSchemeFactory implements SchemeFactory {
+    public ResourceUriStandardScheme getScheme() {
+      return new ResourceUriStandardScheme();
+    }
+  }
+
+  private static class ResourceUriStandardScheme extends StandardScheme<ResourceUri> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, ResourceUri struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // RESOURCE_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.resourceType = org.apache.hadoop.hive.metastore.api.ResourceType.findByValue(iprot.readI32());
+              struct.setResourceTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // URI
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.uri = iprot.readString();
+              struct.setUriIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, ResourceUri struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.resourceType != null) {
+        oprot.writeFieldBegin(RESOURCE_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.resourceType.getValue());
+        oprot.writeFieldEnd();
+      }
+      if (struct.uri != null) {
+        oprot.writeFieldBegin(URI_FIELD_DESC);
+        oprot.writeString(struct.uri);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class ResourceUriTupleSchemeFactory implements SchemeFactory {
+    public ResourceUriTupleScheme getScheme() {
+      return new ResourceUriTupleScheme();
+    }
+  }
+
+  private static class ResourceUriTupleScheme extends TupleScheme<ResourceUri> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, ResourceUri struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetResourceType()) {
+        optionals.set(0);
+      }
+      if (struct.isSetUri()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetResourceType()) {
+        oprot.writeI32(struct.resourceType.getValue());
+      }
+      if (struct.isSetUri()) {
+        oprot.writeString(struct.uri);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, ResourceUri struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.resourceType = org.apache.hadoop.hive.metastore.api.ResourceType.findByValue(iprot.readI32());
+        struct.setResourceTypeIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.uri = iprot.readString();
+        struct.setUriIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
new file mode 100644
index 0000000..326eba0
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Role.java
@@ -0,0 +1,601 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class Role implements org.apache.thrift.TBase<Role, Role._Fields>, java.io.Serializable, Cloneable, Comparable<Role> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("Role");
+
+  private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField OWNER_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("ownerName", org.apache.thrift.protocol.TType.STRING, (short)3);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new RoleStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new RoleTupleSchemeFactory());
+  }
+
+  private String roleName; // required
+  private int createTime; // required
+  private String ownerName; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ROLE_NAME((short)1, "roleName"),
+    CREATE_TIME((short)2, "createTime"),
+    OWNER_NAME((short)3, "ownerName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ROLE_NAME
+          return ROLE_NAME;
+        case 2: // CREATE_TIME
+          return CREATE_TIME;
+        case 3: // OWNER_NAME
+          return OWNER_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CREATETIME_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ROLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("roleName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.OWNER_NAME, new org.apache.thrift.meta_data.FieldMetaData("ownerName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Role.class, metaDataMap);
+  }
+
+  public Role() {
+  }
+
+  public Role(
+    String roleName,
+    int createTime,
+    String ownerName)
+  {
+    this();
+    this.roleName = roleName;
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+    this.ownerName = ownerName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public Role(Role other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetRoleName()) {
+      this.roleName = other.roleName;
+    }
+    this.createTime = other.createTime;
+    if (other.isSetOwnerName()) {
+      this.ownerName = other.ownerName;
+    }
+  }
+
+  public Role deepCopy() {
+    return new Role(this);
+  }
+
+  @Override
+  public void clear() {
+    this.roleName = null;
+    setCreateTimeIsSet(false);
+    this.createTime = 0;
+    this.ownerName = null;
+  }
+
+  public String getRoleName() {
+    return this.roleName;
+  }
+
+  public void setRoleName(String roleName) {
+    this.roleName = roleName;
+  }
+
+  public void unsetRoleName() {
+    this.roleName = null;
+  }
+
+  /** Returns true if field roleName is set (has been assigned a value) and false otherwise */
+  public boolean isSetRoleName() {
+    return this.roleName != null;
+  }
+
+  public void setRoleNameIsSet(boolean value) {
+    if (!value) {
+      this.roleName = null;
+    }
+  }
+
+  public int getCreateTime() {
+    return this.createTime;
+  }
+
+  public void setCreateTime(int createTime) {
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+  }
+
+  public void unsetCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  public void setCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+  }
+
+  public String getOwnerName() {
+    return this.ownerName;
+  }
+
+  public void setOwnerName(String ownerName) {
+    this.ownerName = ownerName;
+  }
+
+  public void unsetOwnerName() {
+    this.ownerName = null;
+  }
+
+  /** Returns true if field ownerName is set (has been assigned a value) and false otherwise */
+  public boolean isSetOwnerName() {
+    return this.ownerName != null;
+  }
+
+  public void setOwnerNameIsSet(boolean value) {
+    if (!value) {
+      this.ownerName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ROLE_NAME:
+      if (value == null) {
+        unsetRoleName();
+      } else {
+        setRoleName((String)value);
+      }
+      break;
+
+    case CREATE_TIME:
+      if (value == null) {
+        unsetCreateTime();
+      } else {
+        setCreateTime((Integer)value);
+      }
+      break;
+
+    case OWNER_NAME:
+      if (value == null) {
+        unsetOwnerName();
+      } else {
+        setOwnerName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ROLE_NAME:
+      return getRoleName();
+
+    case CREATE_TIME:
+      return getCreateTime();
+
+    case OWNER_NAME:
+      return getOwnerName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ROLE_NAME:
+      return isSetRoleName();
+    case CREATE_TIME:
+      return isSetCreateTime();
+    case OWNER_NAME:
+      return isSetOwnerName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof Role)
+      return this.equals((Role)that);
+    return false;
+  }
+
+  public boolean equals(Role that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_roleName = true && this.isSetRoleName();
+    boolean that_present_roleName = true && that.isSetRoleName();
+    if (this_present_roleName || that_present_roleName) {
+      if (!(this_present_roleName && that_present_roleName))
+        return false;
+      if (!this.roleName.equals(that.roleName))
+        return false;
+    }
+
+    boolean this_present_createTime = true;
+    boolean that_present_createTime = true;
+    if (this_present_createTime || that_present_createTime) {
+      if (!(this_present_createTime && that_present_createTime))
+        return false;
+      if (this.createTime != that.createTime)
+        return false;
+    }
+
+    boolean this_present_ownerName = true && this.isSetOwnerName();
+    boolean that_present_ownerName = true && that.isSetOwnerName();
+    if (this_present_ownerName || that_present_ownerName) {
+      if (!(this_present_ownerName && that_present_ownerName))
+        return false;
+      if (!this.ownerName.equals(that.ownerName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_roleName = true && (isSetRoleName());
+    list.add(present_roleName);
+    if (present_roleName)
+      list.add(roleName);
+
+    boolean present_createTime = true;
+    list.add(present_createTime);
+    if (present_createTime)
+      list.add(createTime);
+
+    boolean present_ownerName = true && (isSetOwnerName());
+    list.add(present_ownerName);
+    if (present_ownerName)
+      list.add(ownerName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(Role other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetRoleName()).compareTo(other.isSetRoleName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRoleName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.roleName, other.roleName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOwnerName()).compareTo(other.isSetOwnerName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOwnerName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ownerName, other.ownerName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("Role(");
+    boolean first = true;
+
+    sb.append("roleName:");
+    if (this.roleName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.roleName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("createTime:");
+    sb.append(this.createTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("ownerName:");
+    if (this.ownerName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.ownerName);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class RoleStandardSchemeFactory implements SchemeFactory {
+    public RoleStandardScheme getScheme() {
+      return new RoleStandardScheme();
+    }
+  }
+
+  private static class RoleStandardScheme extends StandardScheme<Role> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, Role struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ROLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.roleName = iprot.readString();
+              struct.setRoleNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.createTime = iprot.readI32();
+              struct.setCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // OWNER_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.ownerName = iprot.readString();
+              struct.setOwnerNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, Role struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.roleName != null) {
+        oprot.writeFieldBegin(ROLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.roleName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+      oprot.writeI32(struct.createTime);
+      oprot.writeFieldEnd();
+      if (struct.ownerName != null) {
+        oprot.writeFieldBegin(OWNER_NAME_FIELD_DESC);
+        oprot.writeString(struct.ownerName);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class RoleTupleSchemeFactory implements SchemeFactory {
+    public RoleTupleScheme getScheme() {
+      return new RoleTupleScheme();
+    }
+  }
+
+  private static class RoleTupleScheme extends TupleScheme<Role> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, Role struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetRoleName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetCreateTime()) {
+        optionals.set(1);
+      }
+      if (struct.isSetOwnerName()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetRoleName()) {
+        oprot.writeString(struct.roleName);
+      }
+      if (struct.isSetCreateTime()) {
+        oprot.writeI32(struct.createTime);
+      }
+      if (struct.isSetOwnerName()) {
+        oprot.writeString(struct.ownerName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, Role struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.roleName = iprot.readString();
+        struct.setRoleNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.createTime = iprot.readI32();
+        struct.setCreateTimeIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.ownerName = iprot.readString();
+        struct.setOwnerNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
new file mode 100644
index 0000000..f9408d9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RolePrincipalGrant.java
@@ -0,0 +1,1035 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class RolePrincipalGrant implements org.apache.thrift.TBase<RolePrincipalGrant, RolePrincipalGrant._Fields>, java.io.Serializable, Cloneable, Comparable<RolePrincipalGrant> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("RolePrincipalGrant");
+
+  private static final org.apache.thrift.protocol.TField ROLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("roleName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("principalName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PRINCIPAL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("principalType", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField GRANT_OPTION_FIELD_DESC = new org.apache.thrift.protocol.TField("grantOption", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField GRANT_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("grantTime", org.apache.thrift.protocol.TType.I32, (short)5);
+  private static final org.apache.thrift.protocol.TField GRANTOR_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("grantorName", org.apache.thrift.protocol.TType.STRING, (short)6);
+  private static final org.apache.thrift.protocol.TField GRANTOR_PRINCIPAL_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("grantorPrincipalType", org.apache.thrift.protocol.TType.I32, (short)7);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new RolePrincipalGrantStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new RolePrincipalGrantTupleSchemeFactory());
+  }
+
+  private String roleName; // required
+  private String principalName; // required
+  private PrincipalType principalType; // required
+  private boolean grantOption; // required
+  private int grantTime; // required
+  private String grantorName; // required
+  private PrincipalType grantorPrincipalType; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    ROLE_NAME((short)1, "roleName"),
+    PRINCIPAL_NAME((short)2, "principalName"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    PRINCIPAL_TYPE((short)3, "principalType"),
+    GRANT_OPTION((short)4, "grantOption"),
+    GRANT_TIME((short)5, "grantTime"),
+    GRANTOR_NAME((short)6, "grantorName"),
+    /**
+     * 
+     * @see PrincipalType
+     */
+    GRANTOR_PRINCIPAL_TYPE((short)7, "grantorPrincipalType");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // ROLE_NAME
+          return ROLE_NAME;
+        case 2: // PRINCIPAL_NAME
+          return PRINCIPAL_NAME;
+        case 3: // PRINCIPAL_TYPE
+          return PRINCIPAL_TYPE;
+        case 4: // GRANT_OPTION
+          return GRANT_OPTION;
+        case 5: // GRANT_TIME
+          return GRANT_TIME;
+        case 6: // GRANTOR_NAME
+          return GRANTOR_NAME;
+        case 7: // GRANTOR_PRINCIPAL_TYPE
+          return GRANTOR_PRINCIPAL_TYPE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __GRANTOPTION_ISSET_ID = 0;
+  private static final int __GRANTTIME_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.ROLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("roleName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PRINCIPAL_NAME, new org.apache.thrift.meta_data.FieldMetaData("principalName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PRINCIPAL_TYPE, new org.apache.thrift.meta_data.FieldMetaData("principalType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    tmpMap.put(_Fields.GRANT_OPTION, new org.apache.thrift.meta_data.FieldMetaData("grantOption", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.GRANT_TIME, new org.apache.thrift.meta_data.FieldMetaData("grantTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.GRANTOR_NAME, new org.apache.thrift.meta_data.FieldMetaData("grantorName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.GRANTOR_PRINCIPAL_TYPE, new org.apache.thrift.meta_data.FieldMetaData("grantorPrincipalType", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, PrincipalType.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(RolePrincipalGrant.class, metaDataMap);
+  }
+
+  public RolePrincipalGrant() {
+  }
+
+  public RolePrincipalGrant(
+    String roleName,
+    String principalName,
+    PrincipalType principalType,
+    boolean grantOption,
+    int grantTime,
+    String grantorName,
+    PrincipalType grantorPrincipalType)
+  {
+    this();
+    this.roleName = roleName;
+    this.principalName = principalName;
+    this.principalType = principalType;
+    this.grantOption = grantOption;
+    setGrantOptionIsSet(true);
+    this.grantTime = grantTime;
+    setGrantTimeIsSet(true);
+    this.grantorName = grantorName;
+    this.grantorPrincipalType = grantorPrincipalType;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public RolePrincipalGrant(RolePrincipalGrant other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetRoleName()) {
+      this.roleName = other.roleName;
+    }
+    if (other.isSetPrincipalName()) {
+      this.principalName = other.principalName;
+    }
+    if (other.isSetPrincipalType()) {
+      this.principalType = other.principalType;
+    }
+    this.grantOption = other.grantOption;
+    this.grantTime = other.grantTime;
+    if (other.isSetGrantorName()) {
+      this.grantorName = other.grantorName;
+    }
+    if (other.isSetGrantorPrincipalType()) {
+      this.grantorPrincipalType = other.grantorPrincipalType;
+    }
+  }
+
+  public RolePrincipalGrant deepCopy() {
+    return new RolePrincipalGrant(this);
+  }
+
+  @Override
+  public void clear() {
+    this.roleName = null;
+    this.principalName = null;
+    this.principalType = null;
+    setGrantOptionIsSet(false);
+    this.grantOption = false;
+    setGrantTimeIsSet(false);
+    this.grantTime = 0;
+    this.grantorName = null;
+    this.grantorPrincipalType = null;
+  }
+
+  public String getRoleName() {
+    return this.roleName;
+  }
+
+  public void setRoleName(String roleName) {
+    this.roleName = roleName;
+  }
+
+  public void unsetRoleName() {
+    this.roleName = null;
+  }
+
+  /** Returns true if field roleName is set (has been assigned a value) and false otherwise */
+  public boolean isSetRoleName() {
+    return this.roleName != null;
+  }
+
+  public void setRoleNameIsSet(boolean value) {
+    if (!value) {
+      this.roleName = null;
+    }
+  }
+
+  public String getPrincipalName() {
+    return this.principalName;
+  }
+
+  public void setPrincipalName(String principalName) {
+    this.principalName = principalName;
+  }
+
+  public void unsetPrincipalName() {
+    this.principalName = null;
+  }
+
+  /** Returns true if field principalName is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalName() {
+    return this.principalName != null;
+  }
+
+  public void setPrincipalNameIsSet(boolean value) {
+    if (!value) {
+      this.principalName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getPrincipalType() {
+    return this.principalType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setPrincipalType(PrincipalType principalType) {
+    this.principalType = principalType;
+  }
+
+  public void unsetPrincipalType() {
+    this.principalType = null;
+  }
+
+  /** Returns true if field principalType is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrincipalType() {
+    return this.principalType != null;
+  }
+
+  public void setPrincipalTypeIsSet(boolean value) {
+    if (!value) {
+      this.principalType = null;
+    }
+  }
+
+  public boolean isGrantOption() {
+    return this.grantOption;
+  }
+
+  public void setGrantOption(boolean grantOption) {
+    this.grantOption = grantOption;
+    setGrantOptionIsSet(true);
+  }
+
+  public void unsetGrantOption() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GRANTOPTION_ISSET_ID);
+  }
+
+  /** Returns true if field grantOption is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantOption() {
+    return EncodingUtils.testBit(__isset_bitfield, __GRANTOPTION_ISSET_ID);
+  }
+
+  public void setGrantOptionIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GRANTOPTION_ISSET_ID, value);
+  }
+
+  public int getGrantTime() {
+    return this.grantTime;
+  }
+
+  public void setGrantTime(int grantTime) {
+    this.grantTime = grantTime;
+    setGrantTimeIsSet(true);
+  }
+
+  public void unsetGrantTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __GRANTTIME_ISSET_ID);
+  }
+
+  /** Returns true if field grantTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __GRANTTIME_ISSET_ID);
+  }
+
+  public void setGrantTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __GRANTTIME_ISSET_ID, value);
+  }
+
+  public String getGrantorName() {
+    return this.grantorName;
+  }
+
+  public void setGrantorName(String grantorName) {
+    this.grantorName = grantorName;
+  }
+
+  public void unsetGrantorName() {
+    this.grantorName = null;
+  }
+
+  /** Returns true if field grantorName is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantorName() {
+    return this.grantorName != null;
+  }
+
+  public void setGrantorNameIsSet(boolean value) {
+    if (!value) {
+      this.grantorName = null;
+    }
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public PrincipalType getGrantorPrincipalType() {
+    return this.grantorPrincipalType;
+  }
+
+  /**
+   * 
+   * @see PrincipalType
+   */
+  public void setGrantorPrincipalType(PrincipalType grantorPrincipalType) {
+    this.grantorPrincipalType = grantorPrincipalType;
+  }
+
+  public void unsetGrantorPrincipalType() {
+    this.grantorPrincipalType = null;
+  }
+
+  /** Returns true if field grantorPrincipalType is set (has been assigned a value) and false otherwise */
+  public boolean isSetGrantorPrincipalType() {
+    return this.grantorPrincipalType != null;
+  }
+
+  public void setGrantorPrincipalTypeIsSet(boolean value) {
+    if (!value) {
+      this.grantorPrincipalType = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case ROLE_NAME:
+      if (value == null) {
+        unsetRoleName();
+      } else {
+        setRoleName((String)value);
+      }
+      break;
+
+    case PRINCIPAL_NAME:
+      if (value == null) {
+        unsetPrincipalName();
+      } else {
+        setPrincipalName((String)value);
+      }
+      break;
+
+    case PRINCIPAL_TYPE:
+      if (value == null) {
+        unsetPrincipalType();
+      } else {
+        setPrincipalType((PrincipalType)value);
+      }
+      break;
+
+    case GRANT_OPTION:
+      if (value == null) {
+        unsetGrantOption();
+      } else {
+        setGrantOption((Boolean)value);
+      }
+      break;
+
+    case GRANT_TIME:
+      if (value == null) {
+        unsetGrantTime();
+      } else {
+        setGrantTime((Integer)value);
+      }
+      break;
+
+    case GRANTOR_NAME:
+      if (value == null) {
+        unsetGrantorName();
+      } else {
+        setGrantorName((String)value);
+      }
+      break;
+
+    case GRANTOR_PRINCIPAL_TYPE:
+      if (value == null) {
+        unsetGrantorPrincipalType();
+      } else {
+        setGrantorPrincipalType((PrincipalType)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case ROLE_NAME:
+      return getRoleName();
+
+    case PRINCIPAL_NAME:
+      return getPrincipalName();
+
+    case PRINCIPAL_TYPE:
+      return getPrincipalType();
+
+    case GRANT_OPTION:
+      return isGrantOption();
+
+    case GRANT_TIME:
+      return getGrantTime();
+
+    case GRANTOR_NAME:
+      return getGrantorName();
+
+    case GRANTOR_PRINCIPAL_TYPE:
+      return getGrantorPrincipalType();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case ROLE_NAME:
+      return isSetRoleName();
+    case PRINCIPAL_NAME:
+      return isSetPrincipalName();
+    case PRINCIPAL_TYPE:
+      return isSetPrincipalType();
+    case GRANT_OPTION:
+      return isSetGrantOption();
+    case GRANT_TIME:
+      return isSetGrantTime();
+    case GRANTOR_NAME:
+      return isSetGrantorName();
+    case GRANTOR_PRINCIPAL_TYPE:
+      return isSetGrantorPrincipalType();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof RolePrincipalGrant)
+      return this.equals((RolePrincipalGrant)that);
+    return false;
+  }
+
+  public boolean equals(RolePrincipalGrant that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_roleName = true && this.isSetRoleName();
+    boolean that_present_roleName = true && that.isSetRoleName();
+    if (this_present_roleName || that_present_roleName) {
+      if (!(this_present_roleName && that_present_roleName))
+        return false;
+      if (!this.roleName.equals(that.roleName))
+        return false;
+    }
+
+    boolean this_present_principalName = true && this.isSetPrincipalName();
+    boolean that_present_principalName = true && that.isSetPrincipalName();
+    if (this_present_principalName || that_present_principalName) {
+      if (!(this_present_principalName && that_present_principalName))
+        return false;
+      if (!this.principalName.equals(that.principalName))
+        return false;
+    }
+
+    boolean this_present_principalType = true && this.isSetPrincipalType();
+    boolean that_present_principalType = true && that.isSetPrincipalType();
+    if (this_present_principalType || that_present_principalType) {
+      if (!(this_present_principalType && that_present_principalType))
+        return false;
+      if (!this.principalType.equals(that.principalType))
+        return false;
+    }
+
+    boolean this_present_grantOption = true;
+    boolean that_present_grantOption = true;
+    if (this_present_grantOption || that_present_grantOption) {
+      if (!(this_present_grantOption && that_present_grantOption))
+        return false;
+      if (this.grantOption != that.grantOption)
+        return false;
+    }
+
+    boolean this_present_grantTime = true;
+    boolean that_present_grantTime = true;
+    if (this_present_grantTime || that_present_grantTime) {
+      if (!(this_present_grantTime && that_present_grantTime))
+        return false;
+      if (this.grantTime != that.grantTime)
+        return false;
+    }
+
+    boolean this_present_grantorName = true && this.isSetGrantorName();
+    boolean that_present_grantorName = true && that.isSetGrantorName();
+    if (this_present_grantorName || that_present_grantorName) {
+      if (!(this_present_grantorName && that_present_grantorName))
+        return false;
+      if (!this.grantorName.equals(that.grantorName))
+        return false;
+    }
+
+    boolean this_present_grantorPrincipalType = true && this.isSetGrantorPrincipalType();
+    boolean that_present_grantorPrincipalType = true && that.isSetGrantorPrincipalType();
+    if (this_present_grantorPrincipalType || that_present_grantorPrincipalType) {
+      if (!(this_present_grantorPrincipalType && that_present_grantorPrincipalType))
+        return false;
+      if (!this.grantorPrincipalType.equals(that.grantorPrincipalType))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_roleName = true && (isSetRoleName());
+    list.add(present_roleName);
+    if (present_roleName)
+      list.add(roleName);
+
+    boolean present_principalName = true && (isSetPrincipalName());
+    list.add(present_principalName);
+    if (present_principalName)
+      list.add(principalName);
+
+    boolean present_principalType = true && (isSetPrincipalType());
+    list.add(present_principalType);
+    if (present_principalType)
+      list.add(principalType.getValue());
+
+    boolean present_grantOption = true;
+    list.add(present_grantOption);
+    if (present_grantOption)
+      list.add(grantOption);
+
+    boolean present_grantTime = true;
+    list.add(present_grantTime);
+    if (present_grantTime)
+      list.add(grantTime);
+
+    boolean present_grantorName = true && (isSetGrantorName());
+    list.add(present_grantorName);
+    if (present_grantorName)
+      list.add(grantorName);
+
+    boolean present_grantorPrincipalType = true && (isSetGrantorPrincipalType());
+    list.add(present_grantorPrincipalType);
+    if (present_grantorPrincipalType)
+      list.add(grantorPrincipalType.getValue());
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(RolePrincipalGrant other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetRoleName()).compareTo(other.isSetRoleName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRoleName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.roleName, other.roleName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipalName()).compareTo(other.isSetPrincipalName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalName, other.principalName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrincipalType()).compareTo(other.isSetPrincipalType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrincipalType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.principalType, other.principalType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantOption()).compareTo(other.isSetGrantOption());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantOption()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantOption, other.grantOption);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantTime()).compareTo(other.isSetGrantTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantTime, other.grantTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantorName()).compareTo(other.isSetGrantorName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantorName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantorName, other.grantorName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetGrantorPrincipalType()).compareTo(other.isSetGrantorPrincipalType());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetGrantorPrincipalType()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.grantorPrincipalType, other.grantorPrincipalType);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("RolePrincipalGrant(");
+    boolean first = true;
+
+    sb.append("roleName:");
+    if (this.roleName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.roleName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principalName:");
+    if (this.principalName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("principalType:");
+    if (this.principalType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.principalType);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantOption:");
+    sb.append(this.grantOption);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantTime:");
+    sb.append(this.grantTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantorName:");
+    if (this.grantorName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.grantorName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("grantorPrincipalType:");
+    if (this.grantorPrincipalType == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.grantorPrincipalType);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class RolePrincipalGrantStandardSchemeFactory implements SchemeFactory {
+    public RolePrincipalGrantStandardScheme getScheme() {
+      return new RolePrincipalGrantStandardScheme();
+    }
+  }
+
+  private static class RolePrincipalGrantStandardScheme extends StandardScheme<RolePrincipalGrant> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, RolePrincipalGrant struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // ROLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.roleName = iprot.readString();
+              struct.setRoleNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PRINCIPAL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.principalName = iprot.readString();
+              struct.setPrincipalNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PRINCIPAL_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.principalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setPrincipalTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // GRANT_OPTION
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.grantOption = iprot.readBool();
+              struct.setGrantOptionIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // GRANT_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.grantTime = iprot.readI32();
+              struct.setGrantTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // GRANTOR_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.grantorName = iprot.readString();
+              struct.setGrantorNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 7: // GRANTOR_PRINCIPAL_TYPE
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.grantorPrincipalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+              struct.setGrantorPrincipalTypeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, RolePrincipalGrant struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.roleName != null) {
+        oprot.writeFieldBegin(ROLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.roleName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principalName != null) {
+        oprot.writeFieldBegin(PRINCIPAL_NAME_FIELD_DESC);
+        oprot.writeString(struct.principalName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.principalType != null) {
+        oprot.writeFieldBegin(PRINCIPAL_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.principalType.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(GRANT_OPTION_FIELD_DESC);
+      oprot.writeBool(struct.grantOption);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(GRANT_TIME_FIELD_DESC);
+      oprot.writeI32(struct.grantTime);
+      oprot.writeFieldEnd();
+      if (struct.grantorName != null) {
+        oprot.writeFieldBegin(GRANTOR_NAME_FIELD_DESC);
+        oprot.writeString(struct.grantorName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.grantorPrincipalType != null) {
+        oprot.writeFieldBegin(GRANTOR_PRINCIPAL_TYPE_FIELD_DESC);
+        oprot.writeI32(struct.grantorPrincipalType.getValue());
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class RolePrincipalGrantTupleSchemeFactory implements SchemeFactory {
+    public RolePrincipalGrantTupleScheme getScheme() {
+      return new RolePrincipalGrantTupleScheme();
+    }
+  }
+
+  private static class RolePrincipalGrantTupleScheme extends TupleScheme<RolePrincipalGrant> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, RolePrincipalGrant struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetRoleName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetPrincipalName()) {
+        optionals.set(1);
+      }
+      if (struct.isSetPrincipalType()) {
+        optionals.set(2);
+      }
+      if (struct.isSetGrantOption()) {
+        optionals.set(3);
+      }
+      if (struct.isSetGrantTime()) {
+        optionals.set(4);
+      }
+      if (struct.isSetGrantorName()) {
+        optionals.set(5);
+      }
+      if (struct.isSetGrantorPrincipalType()) {
+        optionals.set(6);
+      }
+      oprot.writeBitSet(optionals, 7);
+      if (struct.isSetRoleName()) {
+        oprot.writeString(struct.roleName);
+      }
+      if (struct.isSetPrincipalName()) {
+        oprot.writeString(struct.principalName);
+      }
+      if (struct.isSetPrincipalType()) {
+        oprot.writeI32(struct.principalType.getValue());
+      }
+      if (struct.isSetGrantOption()) {
+        oprot.writeBool(struct.grantOption);
+      }
+      if (struct.isSetGrantTime()) {
+        oprot.writeI32(struct.grantTime);
+      }
+      if (struct.isSetGrantorName()) {
+        oprot.writeString(struct.grantorName);
+      }
+      if (struct.isSetGrantorPrincipalType()) {
+        oprot.writeI32(struct.grantorPrincipalType.getValue());
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, RolePrincipalGrant struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(7);
+      if (incoming.get(0)) {
+        struct.roleName = iprot.readString();
+        struct.setRoleNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.principalName = iprot.readString();
+        struct.setPrincipalNameIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.principalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setPrincipalTypeIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.grantOption = iprot.readBool();
+        struct.setGrantOptionIsSet(true);
+      }
+      if (incoming.get(4)) {
+        struct.grantTime = iprot.readI32();
+        struct.setGrantTimeIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.grantorName = iprot.readString();
+        struct.setGrantorNameIsSet(true);
+      }
+      if (incoming.get(6)) {
+        struct.grantorPrincipalType = org.apache.hadoop.hive.metastore.api.PrincipalType.findByValue(iprot.readI32());
+        struct.setGrantorPrincipalTypeIsSet(true);
+      }
+    }
+  }
+
+}
+


[26/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java
new file mode 100644
index 0000000..17f0ee5
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java
@@ -0,0 +1,553 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetFileMetadataByExprResult implements org.apache.thrift.TBase<GetFileMetadataByExprResult, GetFileMetadataByExprResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetFileMetadataByExprResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataByExprResult");
+
+  private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.MAP, (short)1);
+  private static final org.apache.thrift.protocol.TField IS_SUPPORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("isSupported", org.apache.thrift.protocol.TType.BOOL, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetFileMetadataByExprResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetFileMetadataByExprResultTupleSchemeFactory());
+  }
+
+  private Map<Long,MetadataPpdResult> metadata; // required
+  private boolean isSupported; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    METADATA((short)1, "metadata"),
+    IS_SUPPORTED((short)2, "isSupported");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // METADATA
+          return METADATA;
+        case 2: // IS_SUPPORTED
+          return IS_SUPPORTED;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISSUPPORTED_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64), 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, MetadataPpdResult.class))));
+    tmpMap.put(_Fields.IS_SUPPORTED, new org.apache.thrift.meta_data.FieldMetaData("isSupported", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataByExprResult.class, metaDataMap);
+  }
+
+  public GetFileMetadataByExprResult() {
+  }
+
+  public GetFileMetadataByExprResult(
+    Map<Long,MetadataPpdResult> metadata,
+    boolean isSupported)
+  {
+    this();
+    this.metadata = metadata;
+    this.isSupported = isSupported;
+    setIsSupportedIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetFileMetadataByExprResult(GetFileMetadataByExprResult other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetMetadata()) {
+      Map<Long,MetadataPpdResult> __this__metadata = new HashMap<Long,MetadataPpdResult>(other.metadata.size());
+      for (Map.Entry<Long, MetadataPpdResult> other_element : other.metadata.entrySet()) {
+
+        Long other_element_key = other_element.getKey();
+        MetadataPpdResult other_element_value = other_element.getValue();
+
+        Long __this__metadata_copy_key = other_element_key;
+
+        MetadataPpdResult __this__metadata_copy_value = new MetadataPpdResult(other_element_value);
+
+        __this__metadata.put(__this__metadata_copy_key, __this__metadata_copy_value);
+      }
+      this.metadata = __this__metadata;
+    }
+    this.isSupported = other.isSupported;
+  }
+
+  public GetFileMetadataByExprResult deepCopy() {
+    return new GetFileMetadataByExprResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.metadata = null;
+    setIsSupportedIsSet(false);
+    this.isSupported = false;
+  }
+
+  public int getMetadataSize() {
+    return (this.metadata == null) ? 0 : this.metadata.size();
+  }
+
+  public void putToMetadata(long key, MetadataPpdResult val) {
+    if (this.metadata == null) {
+      this.metadata = new HashMap<Long,MetadataPpdResult>();
+    }
+    this.metadata.put(key, val);
+  }
+
+  public Map<Long,MetadataPpdResult> getMetadata() {
+    return this.metadata;
+  }
+
+  public void setMetadata(Map<Long,MetadataPpdResult> metadata) {
+    this.metadata = metadata;
+  }
+
+  public void unsetMetadata() {
+    this.metadata = null;
+  }
+
+  /** Returns true if field metadata is set (has been assigned a value) and false otherwise */
+  public boolean isSetMetadata() {
+    return this.metadata != null;
+  }
+
+  public void setMetadataIsSet(boolean value) {
+    if (!value) {
+      this.metadata = null;
+    }
+  }
+
+  public boolean isIsSupported() {
+    return this.isSupported;
+  }
+
+  public void setIsSupported(boolean isSupported) {
+    this.isSupported = isSupported;
+    setIsSupportedIsSet(true);
+  }
+
+  public void unsetIsSupported() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID);
+  }
+
+  /** Returns true if field isSupported is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsSupported() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID);
+  }
+
+  public void setIsSupportedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case METADATA:
+      if (value == null) {
+        unsetMetadata();
+      } else {
+        setMetadata((Map<Long,MetadataPpdResult>)value);
+      }
+      break;
+
+    case IS_SUPPORTED:
+      if (value == null) {
+        unsetIsSupported();
+      } else {
+        setIsSupported((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case METADATA:
+      return getMetadata();
+
+    case IS_SUPPORTED:
+      return isIsSupported();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case METADATA:
+      return isSetMetadata();
+    case IS_SUPPORTED:
+      return isSetIsSupported();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetFileMetadataByExprResult)
+      return this.equals((GetFileMetadataByExprResult)that);
+    return false;
+  }
+
+  public boolean equals(GetFileMetadataByExprResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_metadata = true && this.isSetMetadata();
+    boolean that_present_metadata = true && that.isSetMetadata();
+    if (this_present_metadata || that_present_metadata) {
+      if (!(this_present_metadata && that_present_metadata))
+        return false;
+      if (!this.metadata.equals(that.metadata))
+        return false;
+    }
+
+    boolean this_present_isSupported = true;
+    boolean that_present_isSupported = true;
+    if (this_present_isSupported || that_present_isSupported) {
+      if (!(this_present_isSupported && that_present_isSupported))
+        return false;
+      if (this.isSupported != that.isSupported)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_metadata = true && (isSetMetadata());
+    list.add(present_metadata);
+    if (present_metadata)
+      list.add(metadata);
+
+    boolean present_isSupported = true;
+    list.add(present_isSupported);
+    if (present_isSupported)
+      list.add(isSupported);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetFileMetadataByExprResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMetadata()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIsSupported()).compareTo(other.isSetIsSupported());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsSupported()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isSupported, other.isSupported);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetFileMetadataByExprResult(");
+    boolean first = true;
+
+    sb.append("metadata:");
+    if (this.metadata == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.metadata);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("isSupported:");
+    sb.append(this.isSupported);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetMetadata()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString());
+    }
+
+    if (!isSetIsSupported()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'isSupported' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetFileMetadataByExprResultStandardSchemeFactory implements SchemeFactory {
+    public GetFileMetadataByExprResultStandardScheme getScheme() {
+      return new GetFileMetadataByExprResultStandardScheme();
+    }
+  }
+
+  private static class GetFileMetadataByExprResultStandardScheme extends StandardScheme<GetFileMetadataByExprResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // METADATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map772 = iprot.readMapBegin();
+                struct.metadata = new HashMap<Long,MetadataPpdResult>(2*_map772.size);
+                long _key773;
+                MetadataPpdResult _val774;
+                for (int _i775 = 0; _i775 < _map772.size; ++_i775)
+                {
+                  _key773 = iprot.readI64();
+                  _val774 = new MetadataPpdResult();
+                  _val774.read(iprot);
+                  struct.metadata.put(_key773, _val774);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setMetadataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // IS_SUPPORTED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isSupported = iprot.readBool();
+              struct.setIsSupportedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.metadata != null) {
+        oprot.writeFieldBegin(METADATA_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size()));
+          for (Map.Entry<Long, MetadataPpdResult> _iter776 : struct.metadata.entrySet())
+          {
+            oprot.writeI64(_iter776.getKey());
+            _iter776.getValue().write(oprot);
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(IS_SUPPORTED_FIELD_DESC);
+      oprot.writeBool(struct.isSupported);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetFileMetadataByExprResultTupleSchemeFactory implements SchemeFactory {
+    public GetFileMetadataByExprResultTupleScheme getScheme() {
+      return new GetFileMetadataByExprResultTupleScheme();
+    }
+  }
+
+  private static class GetFileMetadataByExprResultTupleScheme extends TupleScheme<GetFileMetadataByExprResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.metadata.size());
+        for (Map.Entry<Long, MetadataPpdResult> _iter777 : struct.metadata.entrySet())
+        {
+          oprot.writeI64(_iter777.getKey());
+          _iter777.getValue().write(oprot);
+        }
+      }
+      oprot.writeBool(struct.isSupported);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TMap _map778 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.metadata = new HashMap<Long,MetadataPpdResult>(2*_map778.size);
+        long _key779;
+        MetadataPpdResult _val780;
+        for (int _i781 = 0; _i781 < _map778.size; ++_i781)
+        {
+          _key779 = iprot.readI64();
+          _val780 = new MetadataPpdResult();
+          _val780.read(iprot);
+          struct.metadata.put(_key779, _val780);
+        }
+      }
+      struct.setMetadataIsSet(true);
+      struct.isSupported = iprot.readBool();
+      struct.setIsSupportedIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
new file mode 100644
index 0000000..12b4392
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java
@@ -0,0 +1,438 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetFileMetadataRequest implements org.apache.thrift.TBase<GetFileMetadataRequest, GetFileMetadataRequest._Fields>, java.io.Serializable, Cloneable, Comparable<GetFileMetadataRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataRequest");
+
+  private static final org.apache.thrift.protocol.TField FILE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("fileIds", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetFileMetadataRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetFileMetadataRequestTupleSchemeFactory());
+  }
+
+  private List<Long> fileIds; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    FILE_IDS((short)1, "fileIds");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // FILE_IDS
+          return FILE_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.FILE_IDS, new org.apache.thrift.meta_data.FieldMetaData("fileIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataRequest.class, metaDataMap);
+  }
+
+  public GetFileMetadataRequest() {
+  }
+
+  public GetFileMetadataRequest(
+    List<Long> fileIds)
+  {
+    this();
+    this.fileIds = fileIds;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetFileMetadataRequest(GetFileMetadataRequest other) {
+    if (other.isSetFileIds()) {
+      List<Long> __this__fileIds = new ArrayList<Long>(other.fileIds);
+      this.fileIds = __this__fileIds;
+    }
+  }
+
+  public GetFileMetadataRequest deepCopy() {
+    return new GetFileMetadataRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.fileIds = null;
+  }
+
+  public int getFileIdsSize() {
+    return (this.fileIds == null) ? 0 : this.fileIds.size();
+  }
+
+  public java.util.Iterator<Long> getFileIdsIterator() {
+    return (this.fileIds == null) ? null : this.fileIds.iterator();
+  }
+
+  public void addToFileIds(long elem) {
+    if (this.fileIds == null) {
+      this.fileIds = new ArrayList<Long>();
+    }
+    this.fileIds.add(elem);
+  }
+
+  public List<Long> getFileIds() {
+    return this.fileIds;
+  }
+
+  public void setFileIds(List<Long> fileIds) {
+    this.fileIds = fileIds;
+  }
+
+  public void unsetFileIds() {
+    this.fileIds = null;
+  }
+
+  /** Returns true if field fileIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetFileIds() {
+    return this.fileIds != null;
+  }
+
+  public void setFileIdsIsSet(boolean value) {
+    if (!value) {
+      this.fileIds = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case FILE_IDS:
+      if (value == null) {
+        unsetFileIds();
+      } else {
+        setFileIds((List<Long>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case FILE_IDS:
+      return getFileIds();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case FILE_IDS:
+      return isSetFileIds();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetFileMetadataRequest)
+      return this.equals((GetFileMetadataRequest)that);
+    return false;
+  }
+
+  public boolean equals(GetFileMetadataRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_fileIds = true && this.isSetFileIds();
+    boolean that_present_fileIds = true && that.isSetFileIds();
+    if (this_present_fileIds || that_present_fileIds) {
+      if (!(this_present_fileIds && that_present_fileIds))
+        return false;
+      if (!this.fileIds.equals(that.fileIds))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_fileIds = true && (isSetFileIds());
+    list.add(present_fileIds);
+    if (present_fileIds)
+      list.add(fileIds);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetFileMetadataRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetFileIds()).compareTo(other.isSetFileIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetFileIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fileIds, other.fileIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetFileMetadataRequest(");
+    boolean first = true;
+
+    sb.append("fileIds:");
+    if (this.fileIds == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.fileIds);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetFileIds()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'fileIds' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetFileMetadataRequestStandardSchemeFactory implements SchemeFactory {
+    public GetFileMetadataRequestStandardScheme getScheme() {
+      return new GetFileMetadataRequestStandardScheme();
+    }
+  }
+
+  private static class GetFileMetadataRequestStandardScheme extends StandardScheme<GetFileMetadataRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // FILE_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list800 = iprot.readListBegin();
+                struct.fileIds = new ArrayList<Long>(_list800.size);
+                long _elem801;
+                for (int _i802 = 0; _i802 < _list800.size; ++_i802)
+                {
+                  _elem801 = iprot.readI64();
+                  struct.fileIds.add(_elem801);
+                }
+                iprot.readListEnd();
+              }
+              struct.setFileIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.fileIds != null) {
+        oprot.writeFieldBegin(FILE_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size()));
+          for (long _iter803 : struct.fileIds)
+          {
+            oprot.writeI64(_iter803);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetFileMetadataRequestTupleSchemeFactory implements SchemeFactory {
+    public GetFileMetadataRequestTupleScheme getScheme() {
+      return new GetFileMetadataRequestTupleScheme();
+    }
+  }
+
+  private static class GetFileMetadataRequestTupleScheme extends TupleScheme<GetFileMetadataRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.fileIds.size());
+        for (long _iter804 : struct.fileIds)
+        {
+          oprot.writeI64(_iter804);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list805 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+        struct.fileIds = new ArrayList<Long>(_list805.size);
+        long _elem806;
+        for (int _i807 = 0; _i807 < _list805.size; ++_i807)
+        {
+          _elem806 = iprot.readI64();
+          struct.fileIds.add(_elem806);
+        }
+      }
+      struct.setFileIdsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
new file mode 100644
index 0000000..65708d7
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java
@@ -0,0 +1,540 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetFileMetadataResult implements org.apache.thrift.TBase<GetFileMetadataResult, GetFileMetadataResult._Fields>, java.io.Serializable, Cloneable, Comparable<GetFileMetadataResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetFileMetadataResult");
+
+  private static final org.apache.thrift.protocol.TField METADATA_FIELD_DESC = new org.apache.thrift.protocol.TField("metadata", org.apache.thrift.protocol.TType.MAP, (short)1);
+  private static final org.apache.thrift.protocol.TField IS_SUPPORTED_FIELD_DESC = new org.apache.thrift.protocol.TField("isSupported", org.apache.thrift.protocol.TType.BOOL, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetFileMetadataResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetFileMetadataResultTupleSchemeFactory());
+  }
+
+  private Map<Long,ByteBuffer> metadata; // required
+  private boolean isSupported; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    METADATA((short)1, "metadata"),
+    IS_SUPPORTED((short)2, "isSupported");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // METADATA
+          return METADATA;
+        case 2: // IS_SUPPORTED
+          return IS_SUPPORTED;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __ISSUPPORTED_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.METADATA, new org.apache.thrift.meta_data.FieldMetaData("metadata", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING            , true))));
+    tmpMap.put(_Fields.IS_SUPPORTED, new org.apache.thrift.meta_data.FieldMetaData("isSupported", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetFileMetadataResult.class, metaDataMap);
+  }
+
+  public GetFileMetadataResult() {
+  }
+
+  public GetFileMetadataResult(
+    Map<Long,ByteBuffer> metadata,
+    boolean isSupported)
+  {
+    this();
+    this.metadata = metadata;
+    this.isSupported = isSupported;
+    setIsSupportedIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetFileMetadataResult(GetFileMetadataResult other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetMetadata()) {
+      Map<Long,ByteBuffer> __this__metadata = new HashMap<Long,ByteBuffer>(other.metadata);
+      this.metadata = __this__metadata;
+    }
+    this.isSupported = other.isSupported;
+  }
+
+  public GetFileMetadataResult deepCopy() {
+    return new GetFileMetadataResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.metadata = null;
+    setIsSupportedIsSet(false);
+    this.isSupported = false;
+  }
+
+  public int getMetadataSize() {
+    return (this.metadata == null) ? 0 : this.metadata.size();
+  }
+
+  public void putToMetadata(long key, ByteBuffer val) {
+    if (this.metadata == null) {
+      this.metadata = new HashMap<Long,ByteBuffer>();
+    }
+    this.metadata.put(key, val);
+  }
+
+  public Map<Long,ByteBuffer> getMetadata() {
+    return this.metadata;
+  }
+
+  public void setMetadata(Map<Long,ByteBuffer> metadata) {
+    this.metadata = metadata;
+  }
+
+  public void unsetMetadata() {
+    this.metadata = null;
+  }
+
+  /** Returns true if field metadata is set (has been assigned a value) and false otherwise */
+  public boolean isSetMetadata() {
+    return this.metadata != null;
+  }
+
+  public void setMetadataIsSet(boolean value) {
+    if (!value) {
+      this.metadata = null;
+    }
+  }
+
+  public boolean isIsSupported() {
+    return this.isSupported;
+  }
+
+  public void setIsSupported(boolean isSupported) {
+    this.isSupported = isSupported;
+    setIsSupportedIsSet(true);
+  }
+
+  public void unsetIsSupported() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID);
+  }
+
+  /** Returns true if field isSupported is set (has been assigned a value) and false otherwise */
+  public boolean isSetIsSupported() {
+    return EncodingUtils.testBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID);
+  }
+
+  public void setIsSupportedIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSUPPORTED_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case METADATA:
+      if (value == null) {
+        unsetMetadata();
+      } else {
+        setMetadata((Map<Long,ByteBuffer>)value);
+      }
+      break;
+
+    case IS_SUPPORTED:
+      if (value == null) {
+        unsetIsSupported();
+      } else {
+        setIsSupported((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case METADATA:
+      return getMetadata();
+
+    case IS_SUPPORTED:
+      return isIsSupported();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case METADATA:
+      return isSetMetadata();
+    case IS_SUPPORTED:
+      return isSetIsSupported();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetFileMetadataResult)
+      return this.equals((GetFileMetadataResult)that);
+    return false;
+  }
+
+  public boolean equals(GetFileMetadataResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_metadata = true && this.isSetMetadata();
+    boolean that_present_metadata = true && that.isSetMetadata();
+    if (this_present_metadata || that_present_metadata) {
+      if (!(this_present_metadata && that_present_metadata))
+        return false;
+      if (!this.metadata.equals(that.metadata))
+        return false;
+    }
+
+    boolean this_present_isSupported = true;
+    boolean that_present_isSupported = true;
+    if (this_present_isSupported || that_present_isSupported) {
+      if (!(this_present_isSupported && that_present_isSupported))
+        return false;
+      if (this.isSupported != that.isSupported)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_metadata = true && (isSetMetadata());
+    list.add(present_metadata);
+    if (present_metadata)
+      list.add(metadata);
+
+    boolean present_isSupported = true;
+    list.add(present_isSupported);
+    if (present_isSupported)
+      list.add(isSupported);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetFileMetadataResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMetadata()).compareTo(other.isSetMetadata());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMetadata()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.metadata, other.metadata);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIsSupported()).compareTo(other.isSetIsSupported());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIsSupported()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isSupported, other.isSupported);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetFileMetadataResult(");
+    boolean first = true;
+
+    sb.append("metadata:");
+    if (this.metadata == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.metadata);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("isSupported:");
+    sb.append(this.isSupported);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetMetadata()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'metadata' is unset! Struct:" + toString());
+    }
+
+    if (!isSetIsSupported()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'isSupported' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetFileMetadataResultStandardSchemeFactory implements SchemeFactory {
+    public GetFileMetadataResultStandardScheme getScheme() {
+      return new GetFileMetadataResultStandardScheme();
+    }
+  }
+
+  private static class GetFileMetadataResultStandardScheme extends StandardScheme<GetFileMetadataResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // METADATA
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map790 = iprot.readMapBegin();
+                struct.metadata = new HashMap<Long,ByteBuffer>(2*_map790.size);
+                long _key791;
+                ByteBuffer _val792;
+                for (int _i793 = 0; _i793 < _map790.size; ++_i793)
+                {
+                  _key791 = iprot.readI64();
+                  _val792 = iprot.readBinary();
+                  struct.metadata.put(_key791, _val792);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setMetadataIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // IS_SUPPORTED
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.isSupported = iprot.readBool();
+              struct.setIsSupportedIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.metadata != null) {
+        oprot.writeFieldBegin(METADATA_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size()));
+          for (Map.Entry<Long, ByteBuffer> _iter794 : struct.metadata.entrySet())
+          {
+            oprot.writeI64(_iter794.getKey());
+            oprot.writeBinary(_iter794.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(IS_SUPPORTED_FIELD_DESC);
+      oprot.writeBool(struct.isSupported);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetFileMetadataResultTupleSchemeFactory implements SchemeFactory {
+    public GetFileMetadataResultTupleScheme getScheme() {
+      return new GetFileMetadataResultTupleScheme();
+    }
+  }
+
+  private static class GetFileMetadataResultTupleScheme extends TupleScheme<GetFileMetadataResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.metadata.size());
+        for (Map.Entry<Long, ByteBuffer> _iter795 : struct.metadata.entrySet())
+        {
+          oprot.writeI64(_iter795.getKey());
+          oprot.writeBinary(_iter795.getValue());
+        }
+      }
+      oprot.writeBool(struct.isSupported);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TMap _map796 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+        struct.metadata = new HashMap<Long,ByteBuffer>(2*_map796.size);
+        long _key797;
+        ByteBuffer _val798;
+        for (int _i799 = 0; _i799 < _map796.size; ++_i799)
+        {
+          _key797 = iprot.readI64();
+          _val798 = iprot.readBinary();
+          struct.metadata.put(_key797, _val798);
+        }
+      }
+      struct.setMetadataIsSet(true);
+      struct.isSupported = iprot.readBool();
+      struct.setIsSupportedIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
new file mode 100644
index 0000000..56f239e
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java
@@ -0,0 +1,542 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class GetOpenTxnsInfoResponse implements org.apache.thrift.TBase<GetOpenTxnsInfoResponse, GetOpenTxnsInfoResponse._Fields>, java.io.Serializable, Cloneable, Comparable<GetOpenTxnsInfoResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOpenTxnsInfoResponse");
+
+  private static final org.apache.thrift.protocol.TField TXN_HIGH_WATER_MARK_FIELD_DESC = new org.apache.thrift.protocol.TField("txn_high_water_mark", org.apache.thrift.protocol.TType.I64, (short)1);
+  private static final org.apache.thrift.protocol.TField OPEN_TXNS_FIELD_DESC = new org.apache.thrift.protocol.TField("open_txns", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new GetOpenTxnsInfoResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new GetOpenTxnsInfoResponseTupleSchemeFactory());
+  }
+
+  private long txn_high_water_mark; // required
+  private List<TxnInfo> open_txns; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXN_HIGH_WATER_MARK((short)1, "txn_high_water_mark"),
+    OPEN_TXNS((short)2, "open_txns");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXN_HIGH_WATER_MARK
+          return TXN_HIGH_WATER_MARK;
+        case 2: // OPEN_TXNS
+          return OPEN_TXNS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __TXN_HIGH_WATER_MARK_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXN_HIGH_WATER_MARK, new org.apache.thrift.meta_data.FieldMetaData("txn_high_water_mark", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    tmpMap.put(_Fields.OPEN_TXNS, new org.apache.thrift.meta_data.FieldMetaData("open_txns", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TxnInfo.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetOpenTxnsInfoResponse.class, metaDataMap);
+  }
+
+  public GetOpenTxnsInfoResponse() {
+  }
+
+  public GetOpenTxnsInfoResponse(
+    long txn_high_water_mark,
+    List<TxnInfo> open_txns)
+  {
+    this();
+    this.txn_high_water_mark = txn_high_water_mark;
+    setTxn_high_water_markIsSet(true);
+    this.open_txns = open_txns;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public GetOpenTxnsInfoResponse(GetOpenTxnsInfoResponse other) {
+    __isset_bitfield = other.__isset_bitfield;
+    this.txn_high_water_mark = other.txn_high_water_mark;
+    if (other.isSetOpen_txns()) {
+      List<TxnInfo> __this__open_txns = new ArrayList<TxnInfo>(other.open_txns.size());
+      for (TxnInfo other_element : other.open_txns) {
+        __this__open_txns.add(new TxnInfo(other_element));
+      }
+      this.open_txns = __this__open_txns;
+    }
+  }
+
+  public GetOpenTxnsInfoResponse deepCopy() {
+    return new GetOpenTxnsInfoResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    setTxn_high_water_markIsSet(false);
+    this.txn_high_water_mark = 0;
+    this.open_txns = null;
+  }
+
+  public long getTxn_high_water_mark() {
+    return this.txn_high_water_mark;
+  }
+
+  public void setTxn_high_water_mark(long txn_high_water_mark) {
+    this.txn_high_water_mark = txn_high_water_mark;
+    setTxn_high_water_markIsSet(true);
+  }
+
+  public void unsetTxn_high_water_mark() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXN_HIGH_WATER_MARK_ISSET_ID);
+  }
+
+  /** Returns true if field txn_high_water_mark is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxn_high_water_mark() {
+    return EncodingUtils.testBit(__isset_bitfield, __TXN_HIGH_WATER_MARK_ISSET_ID);
+  }
+
+  public void setTxn_high_water_markIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXN_HIGH_WATER_MARK_ISSET_ID, value);
+  }
+
+  public int getOpen_txnsSize() {
+    return (this.open_txns == null) ? 0 : this.open_txns.size();
+  }
+
+  public java.util.Iterator<TxnInfo> getOpen_txnsIterator() {
+    return (this.open_txns == null) ? null : this.open_txns.iterator();
+  }
+
+  public void addToOpen_txns(TxnInfo elem) {
+    if (this.open_txns == null) {
+      this.open_txns = new ArrayList<TxnInfo>();
+    }
+    this.open_txns.add(elem);
+  }
+
+  public List<TxnInfo> getOpen_txns() {
+    return this.open_txns;
+  }
+
+  public void setOpen_txns(List<TxnInfo> open_txns) {
+    this.open_txns = open_txns;
+  }
+
+  public void unsetOpen_txns() {
+    this.open_txns = null;
+  }
+
+  /** Returns true if field open_txns is set (has been assigned a value) and false otherwise */
+  public boolean isSetOpen_txns() {
+    return this.open_txns != null;
+  }
+
+  public void setOpen_txnsIsSet(boolean value) {
+    if (!value) {
+      this.open_txns = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXN_HIGH_WATER_MARK:
+      if (value == null) {
+        unsetTxn_high_water_mark();
+      } else {
+        setTxn_high_water_mark((Long)value);
+      }
+      break;
+
+    case OPEN_TXNS:
+      if (value == null) {
+        unsetOpen_txns();
+      } else {
+        setOpen_txns((List<TxnInfo>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXN_HIGH_WATER_MARK:
+      return getTxn_high_water_mark();
+
+    case OPEN_TXNS:
+      return getOpen_txns();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXN_HIGH_WATER_MARK:
+      return isSetTxn_high_water_mark();
+    case OPEN_TXNS:
+      return isSetOpen_txns();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof GetOpenTxnsInfoResponse)
+      return this.equals((GetOpenTxnsInfoResponse)that);
+    return false;
+  }
+
+  public boolean equals(GetOpenTxnsInfoResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txn_high_water_mark = true;
+    boolean that_present_txn_high_water_mark = true;
+    if (this_present_txn_high_water_mark || that_present_txn_high_water_mark) {
+      if (!(this_present_txn_high_water_mark && that_present_txn_high_water_mark))
+        return false;
+      if (this.txn_high_water_mark != that.txn_high_water_mark)
+        return false;
+    }
+
+    boolean this_present_open_txns = true && this.isSetOpen_txns();
+    boolean that_present_open_txns = true && that.isSetOpen_txns();
+    if (this_present_open_txns || that_present_open_txns) {
+      if (!(this_present_open_txns && that_present_open_txns))
+        return false;
+      if (!this.open_txns.equals(that.open_txns))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txn_high_water_mark = true;
+    list.add(present_txn_high_water_mark);
+    if (present_txn_high_water_mark)
+      list.add(txn_high_water_mark);
+
+    boolean present_open_txns = true && (isSetOpen_txns());
+    list.add(present_open_txns);
+    if (present_open_txns)
+      list.add(open_txns);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(GetOpenTxnsInfoResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxn_high_water_mark()).compareTo(other.isSetTxn_high_water_mark());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxn_high_water_mark()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txn_high_water_mark, other.txn_high_water_mark);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetOpen_txns()).compareTo(other.isSetOpen_txns());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetOpen_txns()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.open_txns, other.open_txns);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("GetOpenTxnsInfoResponse(");
+    boolean first = true;
+
+    sb.append("txn_high_water_mark:");
+    sb.append(this.txn_high_water_mark);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("open_txns:");
+    if (this.open_txns == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.open_txns);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxn_high_water_mark()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txn_high_water_mark' is unset! Struct:" + toString());
+    }
+
+    if (!isSetOpen_txns()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'open_txns' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class GetOpenTxnsInfoResponseStandardSchemeFactory implements SchemeFactory {
+    public GetOpenTxnsInfoResponseStandardScheme getScheme() {
+      return new GetOpenTxnsInfoResponseStandardScheme();
+    }
+  }
+
+  private static class GetOpenTxnsInfoResponseStandardScheme extends StandardScheme<GetOpenTxnsInfoResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXN_HIGH_WATER_MARK
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.txn_high_water_mark = iprot.readI64();
+              struct.setTxn_high_water_markIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // OPEN_TXNS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list554 = iprot.readListBegin();
+                struct.open_txns = new ArrayList<TxnInfo>(_list554.size);
+                TxnInfo _elem555;
+                for (int _i556 = 0; _i556 < _list554.size; ++_i556)
+                {
+                  _elem555 = new TxnInfo();
+                  _elem555.read(iprot);
+                  struct.open_txns.add(_elem555);
+                }
+                iprot.readListEnd();
+              }
+              struct.setOpen_txnsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      oprot.writeFieldBegin(TXN_HIGH_WATER_MARK_FIELD_DESC);
+      oprot.writeI64(struct.txn_high_water_mark);
+      oprot.writeFieldEnd();
+      if (struct.open_txns != null) {
+        oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size()));
+          for (TxnInfo _iter557 : struct.open_txns)
+          {
+            _iter557.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class GetOpenTxnsInfoResponseTupleSchemeFactory implements SchemeFactory {
+    public GetOpenTxnsInfoResponseTupleScheme getScheme() {
+      return new GetOpenTxnsInfoResponseTupleScheme();
+    }
+  }
+
+  private static class GetOpenTxnsInfoResponseTupleScheme extends TupleScheme<GetOpenTxnsInfoResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeI64(struct.txn_high_water_mark);
+      {
+        oprot.writeI32(struct.open_txns.size());
+        for (TxnInfo _iter558 : struct.open_txns)
+        {
+          _iter558.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.txn_high_water_mark = iprot.readI64();
+      struct.setTxn_high_water_markIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list559 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.open_txns = new ArrayList<TxnInfo>(_list559.size);
+        TxnInfo _elem560;
+        for (int _i561 = 0; _i561 < _list559.size; ++_i561)
+        {
+          _elem560 = new TxnInfo();
+          _elem560.read(iprot);
+          struct.open_txns.add(_elem560);
+        }
+      }
+      struct.setOpen_txnsIsSet(true);
+    }
+  }
+
+}
+


[86/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
index 0000000,247fdaa..47ff56c
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpec.java
@@@ -1,0 -1,932 +1,1136 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionSpec implements org.apache.thrift.TBase<PartitionSpec, PartitionSpec._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionSpec> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionSpec");
+ 
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+   private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField ROOT_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("rootPath", org.apache.thrift.protocol.TType.STRING, (short)3);
+   private static final org.apache.thrift.protocol.TField SHARED_SDPARTITION_SPEC_FIELD_DESC = new org.apache.thrift.protocol.TField("sharedSDPartitionSpec", org.apache.thrift.protocol.TType.STRUCT, (short)4);
+   private static final org.apache.thrift.protocol.TField PARTITION_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("partitionList", org.apache.thrift.protocol.TType.STRUCT, (short)5);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
++  private static final org.apache.thrift.protocol.TField WRITE_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("writeId", org.apache.thrift.protocol.TType.I64, (short)7);
++  private static final org.apache.thrift.protocol.TField IS_STATS_COMPLIANT_FIELD_DESC = new org.apache.thrift.protocol.TField("isStatsCompliant", org.apache.thrift.protocol.TType.BOOL, (short)8);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new PartitionSpecStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new PartitionSpecTupleSchemeFactory());
+   }
+ 
+   private String dbName; // required
+   private String tableName; // required
+   private String rootPath; // required
+   private PartitionSpecWithSharedSD sharedSDPartitionSpec; // optional
+   private PartitionListComposingSpec partitionList; // optional
+   private String catName; // optional
++  private long writeId; // optional
++  private boolean isStatsCompliant; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     DB_NAME((short)1, "dbName"),
+     TABLE_NAME((short)2, "tableName"),
+     ROOT_PATH((short)3, "rootPath"),
+     SHARED_SDPARTITION_SPEC((short)4, "sharedSDPartitionSpec"),
+     PARTITION_LIST((short)5, "partitionList"),
 -    CAT_NAME((short)6, "catName");
++    CAT_NAME((short)6, "catName"),
++    WRITE_ID((short)7, "writeId"),
++    IS_STATS_COMPLIANT((short)8, "isStatsCompliant");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // DB_NAME
+           return DB_NAME;
+         case 2: // TABLE_NAME
+           return TABLE_NAME;
+         case 3: // ROOT_PATH
+           return ROOT_PATH;
+         case 4: // SHARED_SDPARTITION_SPEC
+           return SHARED_SDPARTITION_SPEC;
+         case 5: // PARTITION_LIST
+           return PARTITION_LIST;
+         case 6: // CAT_NAME
+           return CAT_NAME;
++        case 7: // WRITE_ID
++          return WRITE_ID;
++        case 8: // IS_STATS_COMPLIANT
++          return IS_STATS_COMPLIANT;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
 -  private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME};
++  private static final int __WRITEID_ISSET_ID = 0;
++  private static final int __ISSTATSCOMPLIANT_ISSET_ID = 1;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.SHARED_SDPARTITION_SPEC,_Fields.PARTITION_LIST,_Fields.CAT_NAME,_Fields.WRITE_ID,_Fields.IS_STATS_COMPLIANT};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.ROOT_PATH, new org.apache.thrift.meta_data.FieldMetaData("rootPath", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.SHARED_SDPARTITION_SPEC, new org.apache.thrift.meta_data.FieldMetaData("sharedSDPartitionSpec", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionSpecWithSharedSD.class)));
+     tmpMap.put(_Fields.PARTITION_LIST, new org.apache.thrift.meta_data.FieldMetaData("partitionList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionListComposingSpec.class)));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.WRITE_ID, new org.apache.thrift.meta_data.FieldMetaData("writeId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.IS_STATS_COMPLIANT, new org.apache.thrift.meta_data.FieldMetaData("isStatsCompliant", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionSpec.class, metaDataMap);
+   }
+ 
+   public PartitionSpec() {
++    this.writeId = -1L;
++
+   }
+ 
+   public PartitionSpec(
+     String dbName,
+     String tableName,
+     String rootPath)
+   {
+     this();
+     this.dbName = dbName;
+     this.tableName = tableName;
+     this.rootPath = rootPath;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public PartitionSpec(PartitionSpec other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetDbName()) {
+       this.dbName = other.dbName;
+     }
+     if (other.isSetTableName()) {
+       this.tableName = other.tableName;
+     }
+     if (other.isSetRootPath()) {
+       this.rootPath = other.rootPath;
+     }
+     if (other.isSetSharedSDPartitionSpec()) {
+       this.sharedSDPartitionSpec = new PartitionSpecWithSharedSD(other.sharedSDPartitionSpec);
+     }
+     if (other.isSetPartitionList()) {
+       this.partitionList = new PartitionListComposingSpec(other.partitionList);
+     }
+     if (other.isSetCatName()) {
+       this.catName = other.catName;
+     }
++    this.writeId = other.writeId;
++    this.isStatsCompliant = other.isStatsCompliant;
+   }
+ 
+   public PartitionSpec deepCopy() {
+     return new PartitionSpec(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.dbName = null;
+     this.tableName = null;
+     this.rootPath = null;
+     this.sharedSDPartitionSpec = null;
+     this.partitionList = null;
+     this.catName = null;
++    this.writeId = -1L;
++
++    setIsStatsCompliantIsSet(false);
++    this.isStatsCompliant = false;
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = dbName;
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getTableName() {
+     return this.tableName;
+   }
+ 
+   public void setTableName(String tableName) {
+     this.tableName = tableName;
+   }
+ 
+   public void unsetTableName() {
+     this.tableName = null;
+   }
+ 
+   /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTableName() {
+     return this.tableName != null;
+   }
+ 
+   public void setTableNameIsSet(boolean value) {
+     if (!value) {
+       this.tableName = null;
+     }
+   }
+ 
+   public String getRootPath() {
+     return this.rootPath;
+   }
+ 
+   public void setRootPath(String rootPath) {
+     this.rootPath = rootPath;
+   }
+ 
+   public void unsetRootPath() {
+     this.rootPath = null;
+   }
+ 
+   /** Returns true if field rootPath is set (has been assigned a value) and false otherwise */
+   public boolean isSetRootPath() {
+     return this.rootPath != null;
+   }
+ 
+   public void setRootPathIsSet(boolean value) {
+     if (!value) {
+       this.rootPath = null;
+     }
+   }
+ 
+   public PartitionSpecWithSharedSD getSharedSDPartitionSpec() {
+     return this.sharedSDPartitionSpec;
+   }
+ 
+   public void setSharedSDPartitionSpec(PartitionSpecWithSharedSD sharedSDPartitionSpec) {
+     this.sharedSDPartitionSpec = sharedSDPartitionSpec;
+   }
+ 
+   public void unsetSharedSDPartitionSpec() {
+     this.sharedSDPartitionSpec = null;
+   }
+ 
+   /** Returns true if field sharedSDPartitionSpec is set (has been assigned a value) and false otherwise */
+   public boolean isSetSharedSDPartitionSpec() {
+     return this.sharedSDPartitionSpec != null;
+   }
+ 
+   public void setSharedSDPartitionSpecIsSet(boolean value) {
+     if (!value) {
+       this.sharedSDPartitionSpec = null;
+     }
+   }
+ 
+   public PartitionListComposingSpec getPartitionList() {
+     return this.partitionList;
+   }
+ 
+   public void setPartitionList(PartitionListComposingSpec partitionList) {
+     this.partitionList = partitionList;
+   }
+ 
+   public void unsetPartitionList() {
+     this.partitionList = null;
+   }
+ 
+   /** Returns true if field partitionList is set (has been assigned a value) and false otherwise */
+   public boolean isSetPartitionList() {
+     return this.partitionList != null;
+   }
+ 
+   public void setPartitionListIsSet(boolean value) {
+     if (!value) {
+       this.partitionList = null;
+     }
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = catName;
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
++  public long getWriteId() {
++    return this.writeId;
++  }
++
++  public void setWriteId(long writeId) {
++    this.writeId = writeId;
++    setWriteIdIsSet(true);
++  }
++
++  public void unsetWriteId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  /** Returns true if field writeId is set (has been assigned a value) and false otherwise */
++  public boolean isSetWriteId() {
++    return EncodingUtils.testBit(__isset_bitfield, __WRITEID_ISSET_ID);
++  }
++
++  public void setWriteIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __WRITEID_ISSET_ID, value);
++  }
++
++  public boolean isIsStatsCompliant() {
++    return this.isStatsCompliant;
++  }
++
++  public void setIsStatsCompliant(boolean isStatsCompliant) {
++    this.isStatsCompliant = isStatsCompliant;
++    setIsStatsCompliantIsSet(true);
++  }
++
++  public void unsetIsStatsCompliant() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  /** Returns true if field isStatsCompliant is set (has been assigned a value) and false otherwise */
++  public boolean isSetIsStatsCompliant() {
++    return EncodingUtils.testBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID);
++  }
++
++  public void setIsStatsCompliantIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ISSTATSCOMPLIANT_ISSET_ID, value);
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case TABLE_NAME:
+       if (value == null) {
+         unsetTableName();
+       } else {
+         setTableName((String)value);
+       }
+       break;
+ 
+     case ROOT_PATH:
+       if (value == null) {
+         unsetRootPath();
+       } else {
+         setRootPath((String)value);
+       }
+       break;
+ 
+     case SHARED_SDPARTITION_SPEC:
+       if (value == null) {
+         unsetSharedSDPartitionSpec();
+       } else {
+         setSharedSDPartitionSpec((PartitionSpecWithSharedSD)value);
+       }
+       break;
+ 
+     case PARTITION_LIST:
+       if (value == null) {
+         unsetPartitionList();
+       } else {
+         setPartitionList((PartitionListComposingSpec)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
++    case WRITE_ID:
++      if (value == null) {
++        unsetWriteId();
++      } else {
++        setWriteId((Long)value);
++      }
++      break;
++
++    case IS_STATS_COMPLIANT:
++      if (value == null) {
++        unsetIsStatsCompliant();
++      } else {
++        setIsStatsCompliant((Boolean)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case DB_NAME:
+       return getDbName();
+ 
+     case TABLE_NAME:
+       return getTableName();
+ 
+     case ROOT_PATH:
+       return getRootPath();
+ 
+     case SHARED_SDPARTITION_SPEC:
+       return getSharedSDPartitionSpec();
+ 
+     case PARTITION_LIST:
+       return getPartitionList();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
++    case WRITE_ID:
++      return getWriteId();
++
++    case IS_STATS_COMPLIANT:
++      return isIsStatsCompliant();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case DB_NAME:
+       return isSetDbName();
+     case TABLE_NAME:
+       return isSetTableName();
+     case ROOT_PATH:
+       return isSetRootPath();
+     case SHARED_SDPARTITION_SPEC:
+       return isSetSharedSDPartitionSpec();
+     case PARTITION_LIST:
+       return isSetPartitionList();
+     case CAT_NAME:
+       return isSetCatName();
++    case WRITE_ID:
++      return isSetWriteId();
++    case IS_STATS_COMPLIANT:
++      return isSetIsStatsCompliant();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof PartitionSpec)
+       return this.equals((PartitionSpec)that);
+     return false;
+   }
+ 
+   public boolean equals(PartitionSpec that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_tableName = true && this.isSetTableName();
+     boolean that_present_tableName = true && that.isSetTableName();
+     if (this_present_tableName || that_present_tableName) {
+       if (!(this_present_tableName && that_present_tableName))
+         return false;
+       if (!this.tableName.equals(that.tableName))
+         return false;
+     }
+ 
+     boolean this_present_rootPath = true && this.isSetRootPath();
+     boolean that_present_rootPath = true && that.isSetRootPath();
+     if (this_present_rootPath || that_present_rootPath) {
+       if (!(this_present_rootPath && that_present_rootPath))
+         return false;
+       if (!this.rootPath.equals(that.rootPath))
+         return false;
+     }
+ 
+     boolean this_present_sharedSDPartitionSpec = true && this.isSetSharedSDPartitionSpec();
+     boolean that_present_sharedSDPartitionSpec = true && that.isSetSharedSDPartitionSpec();
+     if (this_present_sharedSDPartitionSpec || that_present_sharedSDPartitionSpec) {
+       if (!(this_present_sharedSDPartitionSpec && that_present_sharedSDPartitionSpec))
+         return false;
+       if (!this.sharedSDPartitionSpec.equals(that.sharedSDPartitionSpec))
+         return false;
+     }
+ 
+     boolean this_present_partitionList = true && this.isSetPartitionList();
+     boolean that_present_partitionList = true && that.isSetPartitionList();
+     if (this_present_partitionList || that_present_partitionList) {
+       if (!(this_present_partitionList && that_present_partitionList))
+         return false;
+       if (!this.partitionList.equals(that.partitionList))
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
++    boolean this_present_writeId = true && this.isSetWriteId();
++    boolean that_present_writeId = true && that.isSetWriteId();
++    if (this_present_writeId || that_present_writeId) {
++      if (!(this_present_writeId && that_present_writeId))
++        return false;
++      if (this.writeId != that.writeId)
++        return false;
++    }
++
++    boolean this_present_isStatsCompliant = true && this.isSetIsStatsCompliant();
++    boolean that_present_isStatsCompliant = true && that.isSetIsStatsCompliant();
++    if (this_present_isStatsCompliant || that_present_isStatsCompliant) {
++      if (!(this_present_isStatsCompliant && that_present_isStatsCompliant))
++        return false;
++      if (this.isStatsCompliant != that.isStatsCompliant)
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_tableName = true && (isSetTableName());
+     list.add(present_tableName);
+     if (present_tableName)
+       list.add(tableName);
+ 
+     boolean present_rootPath = true && (isSetRootPath());
+     list.add(present_rootPath);
+     if (present_rootPath)
+       list.add(rootPath);
+ 
+     boolean present_sharedSDPartitionSpec = true && (isSetSharedSDPartitionSpec());
+     list.add(present_sharedSDPartitionSpec);
+     if (present_sharedSDPartitionSpec)
+       list.add(sharedSDPartitionSpec);
+ 
+     boolean present_partitionList = true && (isSetPartitionList());
+     list.add(present_partitionList);
+     if (present_partitionList)
+       list.add(partitionList);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
++    boolean present_writeId = true && (isSetWriteId());
++    list.add(present_writeId);
++    if (present_writeId)
++      list.add(writeId);
++
++    boolean present_isStatsCompliant = true && (isSetIsStatsCompliant());
++    list.add(present_isStatsCompliant);
++    if (present_isStatsCompliant)
++      list.add(isStatsCompliant);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(PartitionSpec other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTableName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetRootPath()).compareTo(other.isSetRootPath());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetRootPath()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.rootPath, other.rootPath);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetSharedSDPartitionSpec()).compareTo(other.isSetSharedSDPartitionSpec());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetSharedSDPartitionSpec()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sharedSDPartitionSpec, other.sharedSDPartitionSpec);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetPartitionList()).compareTo(other.isSetPartitionList());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPartitionList()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitionList, other.partitionList);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetWriteId()).compareTo(other.isSetWriteId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetWriteId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.writeId, other.writeId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetIsStatsCompliant()).compareTo(other.isSetIsStatsCompliant());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetIsStatsCompliant()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.isStatsCompliant, other.isStatsCompliant);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("PartitionSpec(");
+     boolean first = true;
+ 
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tableName:");
+     if (this.tableName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tableName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("rootPath:");
+     if (this.rootPath == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.rootPath);
+     }
+     first = false;
+     if (isSetSharedSDPartitionSpec()) {
+       if (!first) sb.append(", ");
+       sb.append("sharedSDPartitionSpec:");
+       if (this.sharedSDPartitionSpec == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.sharedSDPartitionSpec);
+       }
+       first = false;
+     }
+     if (isSetPartitionList()) {
+       if (!first) sb.append(", ");
+       sb.append("partitionList:");
+       if (this.partitionList == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.partitionList);
+       }
+       first = false;
+     }
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
++    if (isSetWriteId()) {
++      if (!first) sb.append(", ");
++      sb.append("writeId:");
++      sb.append(this.writeId);
++      first = false;
++    }
++    if (isSetIsStatsCompliant()) {
++      if (!first) sb.append(", ");
++      sb.append("isStatsCompliant:");
++      sb.append(this.isStatsCompliant);
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     // check for sub-struct validity
+     if (sharedSDPartitionSpec != null) {
+       sharedSDPartitionSpec.validate();
+     }
+     if (partitionList != null) {
+       partitionList.validate();
+     }
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class PartitionSpecStandardSchemeFactory implements SchemeFactory {
+     public PartitionSpecStandardScheme getScheme() {
+       return new PartitionSpecStandardScheme();
+     }
+   }
+ 
+   private static class PartitionSpecStandardScheme extends StandardScheme<PartitionSpec> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpec struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = iprot.readString();
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // TABLE_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tableName = iprot.readString();
+               struct.setTableNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // ROOT_PATH
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.rootPath = iprot.readString();
+               struct.setRootPathIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // SHARED_SDPARTITION_SPEC
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.sharedSDPartitionSpec = new PartitionSpecWithSharedSD();
+               struct.sharedSDPartitionSpec.read(iprot);
+               struct.setSharedSDPartitionSpecIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 5: // PARTITION_LIST
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+               struct.partitionList = new PartitionListComposingSpec();
+               struct.partitionList.read(iprot);
+               struct.setPartitionListIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 6: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = iprot.readString();
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 7: // WRITE_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.writeId = iprot.readI64();
++              struct.setWriteIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 8: // IS_STATS_COMPLIANT
++            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
++              struct.isStatsCompliant = iprot.readBool();
++              struct.setIsStatsCompliantIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpec struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tableName != null) {
+         oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+         oprot.writeString(struct.tableName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.rootPath != null) {
+         oprot.writeFieldBegin(ROOT_PATH_FIELD_DESC);
+         oprot.writeString(struct.rootPath);
+         oprot.writeFieldEnd();
+       }
+       if (struct.sharedSDPartitionSpec != null) {
+         if (struct.isSetSharedSDPartitionSpec()) {
+           oprot.writeFieldBegin(SHARED_SDPARTITION_SPEC_FIELD_DESC);
+           struct.sharedSDPartitionSpec.write(oprot);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.partitionList != null) {
+         if (struct.isSetPartitionList()) {
+           oprot.writeFieldBegin(PARTITION_LIST_FIELD_DESC);
+           struct.partitionList.write(oprot);
+           oprot.writeFieldEnd();
+         }
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetWriteId()) {
++        oprot.writeFieldBegin(WRITE_ID_FIELD_DESC);
++        oprot.writeI64(struct.writeId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeFieldBegin(IS_STATS_COMPLIANT_FIELD_DESC);
++        oprot.writeBool(struct.isStatsCompliant);
++        oprot.writeFieldEnd();
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class PartitionSpecTupleSchemeFactory implements SchemeFactory {
+     public PartitionSpecTupleScheme getScheme() {
+       return new PartitionSpecTupleScheme();
+     }
+   }
+ 
+   private static class PartitionSpecTupleScheme extends TupleScheme<PartitionSpec> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       BitSet optionals = new BitSet();
+       if (struct.isSetDbName()) {
+         optionals.set(0);
+       }
+       if (struct.isSetTableName()) {
+         optionals.set(1);
+       }
+       if (struct.isSetRootPath()) {
+         optionals.set(2);
+       }
+       if (struct.isSetSharedSDPartitionSpec()) {
+         optionals.set(3);
+       }
+       if (struct.isSetPartitionList()) {
+         optionals.set(4);
+       }
+       if (struct.isSetCatName()) {
+         optionals.set(5);
+       }
 -      oprot.writeBitSet(optionals, 6);
++      if (struct.isSetWriteId()) {
++        optionals.set(6);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        optionals.set(7);
++      }
++      oprot.writeBitSet(optionals, 8);
+       if (struct.isSetDbName()) {
+         oprot.writeString(struct.dbName);
+       }
+       if (struct.isSetTableName()) {
+         oprot.writeString(struct.tableName);
+       }
+       if (struct.isSetRootPath()) {
+         oprot.writeString(struct.rootPath);
+       }
+       if (struct.isSetSharedSDPartitionSpec()) {
+         struct.sharedSDPartitionSpec.write(oprot);
+       }
+       if (struct.isSetPartitionList()) {
+         struct.partitionList.write(oprot);
+       }
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
++      if (struct.isSetWriteId()) {
++        oprot.writeI64(struct.writeId);
++      }
++      if (struct.isSetIsStatsCompliant()) {
++        oprot.writeBool(struct.isStatsCompliant);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpec struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
 -      BitSet incoming = iprot.readBitSet(6);
++      BitSet incoming = iprot.readBitSet(8);
+       if (incoming.get(0)) {
+         struct.dbName = iprot.readString();
+         struct.setDbNameIsSet(true);
+       }
+       if (incoming.get(1)) {
+         struct.tableName = iprot.readString();
+         struct.setTableNameIsSet(true);
+       }
+       if (incoming.get(2)) {
+         struct.rootPath = iprot.readString();
+         struct.setRootPathIsSet(true);
+       }
+       if (incoming.get(3)) {
+         struct.sharedSDPartitionSpec = new PartitionSpecWithSharedSD();
+         struct.sharedSDPartitionSpec.read(iprot);
+         struct.setSharedSDPartitionSpecIsSet(true);
+       }
+       if (incoming.get(4)) {
+         struct.partitionList = new PartitionListComposingSpec();
+         struct.partitionList.read(iprot);
+         struct.setPartitionListIsSet(true);
+       }
+       if (incoming.get(5)) {
+         struct.catName = iprot.readString();
+         struct.setCatNameIsSet(true);
+       }
++      if (incoming.get(6)) {
++        struct.writeId = iprot.readI64();
++        struct.setWriteIdIsSet(true);
++      }
++      if (incoming.get(7)) {
++        struct.isStatsCompliant = iprot.readBool();
++        struct.setIsStatsCompliantIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
index 0000000,91cf567..a298b89
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java
@@@ -1,0 -1,900 +1,1111 @@@
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ package org.apache.hadoop.hive.metastore.api;
+ 
+ import org.apache.thrift.scheme.IScheme;
+ import org.apache.thrift.scheme.SchemeFactory;
+ import org.apache.thrift.scheme.StandardScheme;
+ 
+ import org.apache.thrift.scheme.TupleScheme;
+ import org.apache.thrift.protocol.TTupleProtocol;
+ import org.apache.thrift.protocol.TProtocolException;
+ import org.apache.thrift.EncodingUtils;
+ import org.apache.thrift.TException;
+ import org.apache.thrift.async.AsyncMethodCallback;
+ import org.apache.thrift.server.AbstractNonblockingServer.*;
+ import java.util.List;
+ import java.util.ArrayList;
+ import java.util.Map;
+ import java.util.HashMap;
+ import java.util.EnumMap;
+ import java.util.Set;
+ import java.util.HashSet;
+ import java.util.EnumSet;
+ import java.util.Collections;
+ import java.util.BitSet;
+ import java.nio.ByteBuffer;
+ import java.util.Arrays;
+ import javax.annotation.Generated;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ @SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+ @Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+ @org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionsStatsRequest implements org.apache.thrift.TBase<PartitionsStatsRequest, PartitionsStatsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsStatsRequest> {
+   private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsStatsRequest");
+ 
+   private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+   private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+   private static final org.apache.thrift.protocol.TField COL_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("colNames", org.apache.thrift.protocol.TType.LIST, (short)3);
+   private static final org.apache.thrift.protocol.TField PART_NAMES_FIELD_DESC = new org.apache.thrift.protocol.TField("partNames", org.apache.thrift.protocol.TType.LIST, (short)4);
+   private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)5);
++  private static final org.apache.thrift.protocol.TField TXN_ID_FIELD_DESC = new org.apache.thrift.protocol.TField("txnId", org.apache.thrift.protocol.TType.I64, (short)6);
++  private static final org.apache.thrift.protocol.TField VALID_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("validWriteIdList", org.apache.thrift.protocol.TType.STRING, (short)7);
+ 
+   private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+   static {
+     schemes.put(StandardScheme.class, new PartitionsStatsRequestStandardSchemeFactory());
+     schemes.put(TupleScheme.class, new PartitionsStatsRequestTupleSchemeFactory());
+   }
+ 
+   private String dbName; // required
+   private String tblName; // required
+   private List<String> colNames; // required
+   private List<String> partNames; // required
+   private String catName; // optional
++  private long txnId; // optional
++  private String validWriteIdList; // optional
+ 
+   /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+   public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+     DB_NAME((short)1, "dbName"),
+     TBL_NAME((short)2, "tblName"),
+     COL_NAMES((short)3, "colNames"),
+     PART_NAMES((short)4, "partNames"),
 -    CAT_NAME((short)5, "catName");
++    CAT_NAME((short)5, "catName"),
++    TXN_ID((short)6, "txnId"),
++    VALID_WRITE_ID_LIST((short)7, "validWriteIdList");
+ 
+     private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+ 
+     static {
+       for (_Fields field : EnumSet.allOf(_Fields.class)) {
+         byName.put(field.getFieldName(), field);
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, or null if its not found.
+      */
+     public static _Fields findByThriftId(int fieldId) {
+       switch(fieldId) {
+         case 1: // DB_NAME
+           return DB_NAME;
+         case 2: // TBL_NAME
+           return TBL_NAME;
+         case 3: // COL_NAMES
+           return COL_NAMES;
+         case 4: // PART_NAMES
+           return PART_NAMES;
+         case 5: // CAT_NAME
+           return CAT_NAME;
++        case 6: // TXN_ID
++          return TXN_ID;
++        case 7: // VALID_WRITE_ID_LIST
++          return VALID_WRITE_ID_LIST;
+         default:
+           return null;
+       }
+     }
+ 
+     /**
+      * Find the _Fields constant that matches fieldId, throwing an exception
+      * if it is not found.
+      */
+     public static _Fields findByThriftIdOrThrow(int fieldId) {
+       _Fields fields = findByThriftId(fieldId);
+       if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+       return fields;
+     }
+ 
+     /**
+      * Find the _Fields constant that matches name, or null if its not found.
+      */
+     public static _Fields findByName(String name) {
+       return byName.get(name);
+     }
+ 
+     private final short _thriftId;
+     private final String _fieldName;
+ 
+     _Fields(short thriftId, String fieldName) {
+       _thriftId = thriftId;
+       _fieldName = fieldName;
+     }
+ 
+     public short getThriftFieldId() {
+       return _thriftId;
+     }
+ 
+     public String getFieldName() {
+       return _fieldName;
+     }
+   }
+ 
+   // isset id assignments
 -  private static final _Fields optionals[] = {_Fields.CAT_NAME};
++  private static final int __TXNID_ISSET_ID = 0;
++  private byte __isset_bitfield = 0;
++  private static final _Fields optionals[] = {_Fields.CAT_NAME,_Fields.TXN_ID,_Fields.VALID_WRITE_ID_LIST};
+   public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+   static {
+     Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+     tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     tmpMap.put(_Fields.COL_NAMES, new org.apache.thrift.meta_data.FieldMetaData("colNames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+     tmpMap.put(_Fields.PART_NAMES, new org.apache.thrift.meta_data.FieldMetaData("partNames", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+         new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+             new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+     tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+         new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
++    tmpMap.put(_Fields.TXN_ID, new org.apache.thrift.meta_data.FieldMetaData("txnId", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
++    tmpMap.put(_Fields.VALID_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("validWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
++        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+     metaDataMap = Collections.unmodifiableMap(tmpMap);
+     org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsStatsRequest.class, metaDataMap);
+   }
+ 
+   public PartitionsStatsRequest() {
++    this.txnId = -1L;
++
+   }
+ 
+   public PartitionsStatsRequest(
+     String dbName,
+     String tblName,
+     List<String> colNames,
+     List<String> partNames)
+   {
+     this();
+     this.dbName = dbName;
+     this.tblName = tblName;
+     this.colNames = colNames;
+     this.partNames = partNames;
+   }
+ 
+   /**
+    * Performs a deep copy on <i>other</i>.
+    */
+   public PartitionsStatsRequest(PartitionsStatsRequest other) {
++    __isset_bitfield = other.__isset_bitfield;
+     if (other.isSetDbName()) {
+       this.dbName = other.dbName;
+     }
+     if (other.isSetTblName()) {
+       this.tblName = other.tblName;
+     }
+     if (other.isSetColNames()) {
+       List<String> __this__colNames = new ArrayList<String>(other.colNames);
+       this.colNames = __this__colNames;
+     }
+     if (other.isSetPartNames()) {
+       List<String> __this__partNames = new ArrayList<String>(other.partNames);
+       this.partNames = __this__partNames;
+     }
+     if (other.isSetCatName()) {
+       this.catName = other.catName;
+     }
++    this.txnId = other.txnId;
++    if (other.isSetValidWriteIdList()) {
++      this.validWriteIdList = other.validWriteIdList;
++    }
+   }
+ 
+   public PartitionsStatsRequest deepCopy() {
+     return new PartitionsStatsRequest(this);
+   }
+ 
+   @Override
+   public void clear() {
+     this.dbName = null;
+     this.tblName = null;
+     this.colNames = null;
+     this.partNames = null;
+     this.catName = null;
++    this.txnId = -1L;
++
++    this.validWriteIdList = null;
+   }
+ 
+   public String getDbName() {
+     return this.dbName;
+   }
+ 
+   public void setDbName(String dbName) {
+     this.dbName = dbName;
+   }
+ 
+   public void unsetDbName() {
+     this.dbName = null;
+   }
+ 
+   /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+   public boolean isSetDbName() {
+     return this.dbName != null;
+   }
+ 
+   public void setDbNameIsSet(boolean value) {
+     if (!value) {
+       this.dbName = null;
+     }
+   }
+ 
+   public String getTblName() {
+     return this.tblName;
+   }
+ 
+   public void setTblName(String tblName) {
+     this.tblName = tblName;
+   }
+ 
+   public void unsetTblName() {
+     this.tblName = null;
+   }
+ 
+   /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+   public boolean isSetTblName() {
+     return this.tblName != null;
+   }
+ 
+   public void setTblNameIsSet(boolean value) {
+     if (!value) {
+       this.tblName = null;
+     }
+   }
+ 
+   public int getColNamesSize() {
+     return (this.colNames == null) ? 0 : this.colNames.size();
+   }
+ 
+   public java.util.Iterator<String> getColNamesIterator() {
+     return (this.colNames == null) ? null : this.colNames.iterator();
+   }
+ 
+   public void addToColNames(String elem) {
+     if (this.colNames == null) {
+       this.colNames = new ArrayList<String>();
+     }
+     this.colNames.add(elem);
+   }
+ 
+   public List<String> getColNames() {
+     return this.colNames;
+   }
+ 
+   public void setColNames(List<String> colNames) {
+     this.colNames = colNames;
+   }
+ 
+   public void unsetColNames() {
+     this.colNames = null;
+   }
+ 
+   /** Returns true if field colNames is set (has been assigned a value) and false otherwise */
+   public boolean isSetColNames() {
+     return this.colNames != null;
+   }
+ 
+   public void setColNamesIsSet(boolean value) {
+     if (!value) {
+       this.colNames = null;
+     }
+   }
+ 
+   public int getPartNamesSize() {
+     return (this.partNames == null) ? 0 : this.partNames.size();
+   }
+ 
+   public java.util.Iterator<String> getPartNamesIterator() {
+     return (this.partNames == null) ? null : this.partNames.iterator();
+   }
+ 
+   public void addToPartNames(String elem) {
+     if (this.partNames == null) {
+       this.partNames = new ArrayList<String>();
+     }
+     this.partNames.add(elem);
+   }
+ 
+   public List<String> getPartNames() {
+     return this.partNames;
+   }
+ 
+   public void setPartNames(List<String> partNames) {
+     this.partNames = partNames;
+   }
+ 
+   public void unsetPartNames() {
+     this.partNames = null;
+   }
+ 
+   /** Returns true if field partNames is set (has been assigned a value) and false otherwise */
+   public boolean isSetPartNames() {
+     return this.partNames != null;
+   }
+ 
+   public void setPartNamesIsSet(boolean value) {
+     if (!value) {
+       this.partNames = null;
+     }
+   }
+ 
+   public String getCatName() {
+     return this.catName;
+   }
+ 
+   public void setCatName(String catName) {
+     this.catName = catName;
+   }
+ 
+   public void unsetCatName() {
+     this.catName = null;
+   }
+ 
+   /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+   public boolean isSetCatName() {
+     return this.catName != null;
+   }
+ 
+   public void setCatNameIsSet(boolean value) {
+     if (!value) {
+       this.catName = null;
+     }
+   }
+ 
++  public long getTxnId() {
++    return this.txnId;
++  }
++
++  public void setTxnId(long txnId) {
++    this.txnId = txnId;
++    setTxnIdIsSet(true);
++  }
++
++  public void unsetTxnId() {
++    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  /** Returns true if field txnId is set (has been assigned a value) and false otherwise */
++  public boolean isSetTxnId() {
++    return EncodingUtils.testBit(__isset_bitfield, __TXNID_ISSET_ID);
++  }
++
++  public void setTxnIdIsSet(boolean value) {
++    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TXNID_ISSET_ID, value);
++  }
++
++  public String getValidWriteIdList() {
++    return this.validWriteIdList;
++  }
++
++  public void setValidWriteIdList(String validWriteIdList) {
++    this.validWriteIdList = validWriteIdList;
++  }
++
++  public void unsetValidWriteIdList() {
++    this.validWriteIdList = null;
++  }
++
++  /** Returns true if field validWriteIdList is set (has been assigned a value) and false otherwise */
++  public boolean isSetValidWriteIdList() {
++    return this.validWriteIdList != null;
++  }
++
++  public void setValidWriteIdListIsSet(boolean value) {
++    if (!value) {
++      this.validWriteIdList = null;
++    }
++  }
++
+   public void setFieldValue(_Fields field, Object value) {
+     switch (field) {
+     case DB_NAME:
+       if (value == null) {
+         unsetDbName();
+       } else {
+         setDbName((String)value);
+       }
+       break;
+ 
+     case TBL_NAME:
+       if (value == null) {
+         unsetTblName();
+       } else {
+         setTblName((String)value);
+       }
+       break;
+ 
+     case COL_NAMES:
+       if (value == null) {
+         unsetColNames();
+       } else {
+         setColNames((List<String>)value);
+       }
+       break;
+ 
+     case PART_NAMES:
+       if (value == null) {
+         unsetPartNames();
+       } else {
+         setPartNames((List<String>)value);
+       }
+       break;
+ 
+     case CAT_NAME:
+       if (value == null) {
+         unsetCatName();
+       } else {
+         setCatName((String)value);
+       }
+       break;
+ 
++    case TXN_ID:
++      if (value == null) {
++        unsetTxnId();
++      } else {
++        setTxnId((Long)value);
++      }
++      break;
++
++    case VALID_WRITE_ID_LIST:
++      if (value == null) {
++        unsetValidWriteIdList();
++      } else {
++        setValidWriteIdList((String)value);
++      }
++      break;
++
+     }
+   }
+ 
+   public Object getFieldValue(_Fields field) {
+     switch (field) {
+     case DB_NAME:
+       return getDbName();
+ 
+     case TBL_NAME:
+       return getTblName();
+ 
+     case COL_NAMES:
+       return getColNames();
+ 
+     case PART_NAMES:
+       return getPartNames();
+ 
+     case CAT_NAME:
+       return getCatName();
+ 
++    case TXN_ID:
++      return getTxnId();
++
++    case VALID_WRITE_ID_LIST:
++      return getValidWriteIdList();
++
+     }
+     throw new IllegalStateException();
+   }
+ 
+   /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+   public boolean isSet(_Fields field) {
+     if (field == null) {
+       throw new IllegalArgumentException();
+     }
+ 
+     switch (field) {
+     case DB_NAME:
+       return isSetDbName();
+     case TBL_NAME:
+       return isSetTblName();
+     case COL_NAMES:
+       return isSetColNames();
+     case PART_NAMES:
+       return isSetPartNames();
+     case CAT_NAME:
+       return isSetCatName();
++    case TXN_ID:
++      return isSetTxnId();
++    case VALID_WRITE_ID_LIST:
++      return isSetValidWriteIdList();
+     }
+     throw new IllegalStateException();
+   }
+ 
+   @Override
+   public boolean equals(Object that) {
+     if (that == null)
+       return false;
+     if (that instanceof PartitionsStatsRequest)
+       return this.equals((PartitionsStatsRequest)that);
+     return false;
+   }
+ 
+   public boolean equals(PartitionsStatsRequest that) {
+     if (that == null)
+       return false;
+ 
+     boolean this_present_dbName = true && this.isSetDbName();
+     boolean that_present_dbName = true && that.isSetDbName();
+     if (this_present_dbName || that_present_dbName) {
+       if (!(this_present_dbName && that_present_dbName))
+         return false;
+       if (!this.dbName.equals(that.dbName))
+         return false;
+     }
+ 
+     boolean this_present_tblName = true && this.isSetTblName();
+     boolean that_present_tblName = true && that.isSetTblName();
+     if (this_present_tblName || that_present_tblName) {
+       if (!(this_present_tblName && that_present_tblName))
+         return false;
+       if (!this.tblName.equals(that.tblName))
+         return false;
+     }
+ 
+     boolean this_present_colNames = true && this.isSetColNames();
+     boolean that_present_colNames = true && that.isSetColNames();
+     if (this_present_colNames || that_present_colNames) {
+       if (!(this_present_colNames && that_present_colNames))
+         return false;
+       if (!this.colNames.equals(that.colNames))
+         return false;
+     }
+ 
+     boolean this_present_partNames = true && this.isSetPartNames();
+     boolean that_present_partNames = true && that.isSetPartNames();
+     if (this_present_partNames || that_present_partNames) {
+       if (!(this_present_partNames && that_present_partNames))
+         return false;
+       if (!this.partNames.equals(that.partNames))
+         return false;
+     }
+ 
+     boolean this_present_catName = true && this.isSetCatName();
+     boolean that_present_catName = true && that.isSetCatName();
+     if (this_present_catName || that_present_catName) {
+       if (!(this_present_catName && that_present_catName))
+         return false;
+       if (!this.catName.equals(that.catName))
+         return false;
+     }
+ 
++    boolean this_present_txnId = true && this.isSetTxnId();
++    boolean that_present_txnId = true && that.isSetTxnId();
++    if (this_present_txnId || that_present_txnId) {
++      if (!(this_present_txnId && that_present_txnId))
++        return false;
++      if (this.txnId != that.txnId)
++        return false;
++    }
++
++    boolean this_present_validWriteIdList = true && this.isSetValidWriteIdList();
++    boolean that_present_validWriteIdList = true && that.isSetValidWriteIdList();
++    if (this_present_validWriteIdList || that_present_validWriteIdList) {
++      if (!(this_present_validWriteIdList && that_present_validWriteIdList))
++        return false;
++      if (!this.validWriteIdList.equals(that.validWriteIdList))
++        return false;
++    }
++
+     return true;
+   }
+ 
+   @Override
+   public int hashCode() {
+     List<Object> list = new ArrayList<Object>();
+ 
+     boolean present_dbName = true && (isSetDbName());
+     list.add(present_dbName);
+     if (present_dbName)
+       list.add(dbName);
+ 
+     boolean present_tblName = true && (isSetTblName());
+     list.add(present_tblName);
+     if (present_tblName)
+       list.add(tblName);
+ 
+     boolean present_colNames = true && (isSetColNames());
+     list.add(present_colNames);
+     if (present_colNames)
+       list.add(colNames);
+ 
+     boolean present_partNames = true && (isSetPartNames());
+     list.add(present_partNames);
+     if (present_partNames)
+       list.add(partNames);
+ 
+     boolean present_catName = true && (isSetCatName());
+     list.add(present_catName);
+     if (present_catName)
+       list.add(catName);
+ 
++    boolean present_txnId = true && (isSetTxnId());
++    list.add(present_txnId);
++    if (present_txnId)
++      list.add(txnId);
++
++    boolean present_validWriteIdList = true && (isSetValidWriteIdList());
++    list.add(present_validWriteIdList);
++    if (present_validWriteIdList)
++      list.add(validWriteIdList);
++
+     return list.hashCode();
+   }
+ 
+   @Override
+   public int compareTo(PartitionsStatsRequest other) {
+     if (!getClass().equals(other.getClass())) {
+       return getClass().getName().compareTo(other.getClass().getName());
+     }
+ 
+     int lastComparison = 0;
+ 
+     lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetDbName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetTblName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetColNames()).compareTo(other.isSetColNames());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetColNames()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colNames, other.colNames);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetPartNames()).compareTo(other.isSetPartNames());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetPartNames()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partNames, other.partNames);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
+     lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+     if (lastComparison != 0) {
+       return lastComparison;
+     }
+     if (isSetCatName()) {
+       lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+       if (lastComparison != 0) {
+         return lastComparison;
+       }
+     }
++    lastComparison = Boolean.valueOf(isSetTxnId()).compareTo(other.isSetTxnId());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetTxnId()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnId, other.txnId);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
++    lastComparison = Boolean.valueOf(isSetValidWriteIdList()).compareTo(other.isSetValidWriteIdList());
++    if (lastComparison != 0) {
++      return lastComparison;
++    }
++    if (isSetValidWriteIdList()) {
++      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.validWriteIdList, other.validWriteIdList);
++      if (lastComparison != 0) {
++        return lastComparison;
++      }
++    }
+     return 0;
+   }
+ 
+   public _Fields fieldForId(int fieldId) {
+     return _Fields.findByThriftId(fieldId);
+   }
+ 
+   public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+     schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+   }
+ 
+   public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+     schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+   }
+ 
+   @Override
+   public String toString() {
+     StringBuilder sb = new StringBuilder("PartitionsStatsRequest(");
+     boolean first = true;
+ 
+     sb.append("dbName:");
+     if (this.dbName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.dbName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("tblName:");
+     if (this.tblName == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.tblName);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("colNames:");
+     if (this.colNames == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.colNames);
+     }
+     first = false;
+     if (!first) sb.append(", ");
+     sb.append("partNames:");
+     if (this.partNames == null) {
+       sb.append("null");
+     } else {
+       sb.append(this.partNames);
+     }
+     first = false;
+     if (isSetCatName()) {
+       if (!first) sb.append(", ");
+       sb.append("catName:");
+       if (this.catName == null) {
+         sb.append("null");
+       } else {
+         sb.append(this.catName);
+       }
+       first = false;
+     }
++    if (isSetTxnId()) {
++      if (!first) sb.append(", ");
++      sb.append("txnId:");
++      sb.append(this.txnId);
++      first = false;
++    }
++    if (isSetValidWriteIdList()) {
++      if (!first) sb.append(", ");
++      sb.append("validWriteIdList:");
++      if (this.validWriteIdList == null) {
++        sb.append("null");
++      } else {
++        sb.append(this.validWriteIdList);
++      }
++      first = false;
++    }
+     sb.append(")");
+     return sb.toString();
+   }
+ 
+   public void validate() throws org.apache.thrift.TException {
+     // check for required fields
+     if (!isSetDbName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetTblName()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetColNames()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'colNames' is unset! Struct:" + toString());
+     }
+ 
+     if (!isSetPartNames()) {
+       throw new org.apache.thrift.protocol.TProtocolException("Required field 'partNames' is unset! Struct:" + toString());
+     }
+ 
+     // check for sub-struct validity
+   }
+ 
+   private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+     try {
+       write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+     try {
++      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
++      __isset_bitfield = 0;
+       read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+     } catch (org.apache.thrift.TException te) {
+       throw new java.io.IOException(te);
+     }
+   }
+ 
+   private static class PartitionsStatsRequestStandardSchemeFactory implements SchemeFactory {
+     public PartitionsStatsRequestStandardScheme getScheme() {
+       return new PartitionsStatsRequestStandardScheme();
+     }
+   }
+ 
+   private static class PartitionsStatsRequestStandardScheme extends StandardScheme<PartitionsStatsRequest> {
+ 
+     public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       org.apache.thrift.protocol.TField schemeField;
+       iprot.readStructBegin();
+       while (true)
+       {
+         schemeField = iprot.readFieldBegin();
+         if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+           break;
+         }
+         switch (schemeField.id) {
+           case 1: // DB_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.dbName = iprot.readString();
+               struct.setDbNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 2: // TBL_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.tblName = iprot.readString();
+               struct.setTblNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 3: // COL_NAMES
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list458 = iprot.readListBegin();
+                 struct.colNames = new ArrayList<String>(_list458.size);
+                 String _elem459;
+                 for (int _i460 = 0; _i460 < _list458.size; ++_i460)
+                 {
+                   _elem459 = iprot.readString();
+                   struct.colNames.add(_elem459);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setColNamesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 4: // PART_NAMES
+             if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+               {
+                 org.apache.thrift.protocol.TList _list461 = iprot.readListBegin();
+                 struct.partNames = new ArrayList<String>(_list461.size);
+                 String _elem462;
+                 for (int _i463 = 0; _i463 < _list461.size; ++_i463)
+                 {
+                   _elem462 = iprot.readString();
+                   struct.partNames.add(_elem462);
+                 }
+                 iprot.readListEnd();
+               }
+               struct.setPartNamesIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
+           case 5: // CAT_NAME
+             if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+               struct.catName = iprot.readString();
+               struct.setCatNameIsSet(true);
+             } else { 
+               org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+             }
+             break;
++          case 6: // TXN_ID
++            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
++              struct.txnId = iprot.readI64();
++              struct.setTxnIdIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
++          case 7: // VALID_WRITE_ID_LIST
++            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
++              struct.validWriteIdList = iprot.readString();
++              struct.setValidWriteIdListIsSet(true);
++            } else { 
++              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
++            }
++            break;
+           default:
+             org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+         }
+         iprot.readFieldEnd();
+       }
+       iprot.readStructEnd();
+       struct.validate();
+     }
+ 
+     public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       struct.validate();
+ 
+       oprot.writeStructBegin(STRUCT_DESC);
+       if (struct.dbName != null) {
+         oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+         oprot.writeString(struct.dbName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.tblName != null) {
+         oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+         oprot.writeString(struct.tblName);
+         oprot.writeFieldEnd();
+       }
+       if (struct.colNames != null) {
+         oprot.writeFieldBegin(COL_NAMES_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size()));
+           for (String _iter464 : struct.colNames)
+           {
+             oprot.writeString(_iter464);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.partNames != null) {
+         oprot.writeFieldBegin(PART_NAMES_FIELD_DESC);
+         {
+           oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size()));
+           for (String _iter465 : struct.partNames)
+           {
+             oprot.writeString(_iter465);
+           }
+           oprot.writeListEnd();
+         }
+         oprot.writeFieldEnd();
+       }
+       if (struct.catName != null) {
+         if (struct.isSetCatName()) {
+           oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+           oprot.writeString(struct.catName);
+           oprot.writeFieldEnd();
+         }
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeFieldBegin(TXN_ID_FIELD_DESC);
++        oprot.writeI64(struct.txnId);
++        oprot.writeFieldEnd();
++      }
++      if (struct.validWriteIdList != null) {
++        if (struct.isSetValidWriteIdList()) {
++          oprot.writeFieldBegin(VALID_WRITE_ID_LIST_FIELD_DESC);
++          oprot.writeString(struct.validWriteIdList);
++          oprot.writeFieldEnd();
++        }
++      }
+       oprot.writeFieldStop();
+       oprot.writeStructEnd();
+     }
+ 
+   }
+ 
+   private static class PartitionsStatsRequestTupleSchemeFactory implements SchemeFactory {
+     public PartitionsStatsRequestTupleScheme getScheme() {
+       return new PartitionsStatsRequestTupleScheme();
+     }
+   }
+ 
+   private static class PartitionsStatsRequestTupleScheme extends TupleScheme<PartitionsStatsRequest> {
+ 
+     @Override
+     public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol oprot = (TTupleProtocol) prot;
+       oprot.writeString(struct.dbName);
+       oprot.writeString(struct.tblName);
+       {
+         oprot.writeI32(struct.colNames.size());
+         for (String _iter466 : struct.colNames)
+         {
+           oprot.writeString(_iter466);
+         }
+       }
+       {
+         oprot.writeI32(struct.partNames.size());
+         for (String _iter467 : struct.partNames)
+         {
+           oprot.writeString(_iter467);
+         }
+       }
+       BitSet optionals = new BitSet();
+       if (struct.isSetCatName()) {
+         optionals.set(0);
+       }
 -      oprot.writeBitSet(optionals, 1);
++      if (struct.isSetTxnId()) {
++        optionals.set(1);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        optionals.set(2);
++      }
++      oprot.writeBitSet(optionals, 3);
+       if (struct.isSetCatName()) {
+         oprot.writeString(struct.catName);
+       }
++      if (struct.isSetTxnId()) {
++        oprot.writeI64(struct.txnId);
++      }
++      if (struct.isSetValidWriteIdList()) {
++        oprot.writeString(struct.validWriteIdList);
++      }
+     }
+ 
+     @Override
+     public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequest struct) throws org.apache.thrift.TException {
+       TTupleProtocol iprot = (TTupleProtocol) prot;
+       struct.dbName = iprot.readString();
+       struct.setDbNameIsSet(true);
+       struct.tblName = iprot.readString();
+       struct.setTblNameIsSet(true);
+       {
+         org.apache.thrift.protocol.TList _list468 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+         struct.colNames = new ArrayList<String>(_list468.size);
+         String _elem469;
+         for (int _i470 = 0; _i470 < _list468.size; ++_i470)
+         {
+           _elem469 = iprot.readString();
+           struct.colNames.add(_elem469);
+         }
+       }
+       struct.setColNamesIsSet(true);
+       {
+         org.apache.thrift.protocol.TList _list471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+         struct.partNames = new ArrayList<String>(_list471.size);
+         String _elem472;
+         for (int _i473 = 0; _i473 < _list471.size; ++_i473)
+         {
+           _elem472 = iprot.readString();
+           struct.partNames.add(_elem472);
+         }
+       }
+       struct.setPartNamesIsSet(true);
 -      BitSet incoming = iprot.readBitSet(1);
++      BitSet incoming = iprot.readBitSet(3);
+       if (incoming.get(0)) {
+         struct.catName = iprot.readString();
+         struct.setCatNameIsSet(true);
+       }
++      if (incoming.get(1)) {
++        struct.txnId = iprot.readI64();
++        struct.setTxnIdIsSet(true);
++      }
++      if (incoming.get(2)) {
++        struct.validWriteIdList = iprot.readString();
++        struct.setValidWriteIdListIsSet(true);
++      }
+     }
+   }
+ 
+ }
+ 


[51/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/20eb7b51
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/20eb7b51
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/20eb7b51

Branch: refs/heads/master-txnstats
Commit: 20eb7b516f373f42b07f979ba03363c116adb99c
Parents: 57dd304
Author: Vihang Karajgaonkar <vi...@cloudera.com>
Authored: Thu Jul 12 18:03:02 2018 -0700
Committer: Vihang Karajgaonkar <vi...@cloudera.com>
Committed: Thu Jul 12 18:03:02 2018 -0700

----------------------------------------------------------------------
 .gitignore                                      |      1 +
 hcatalog/core/pom.xml                           |      2 +-
 hcatalog/webhcat/java-client/pom.xml            |      2 +-
 hcatalog/webhcat/svr/pom.xml                    |      2 +-
 itests/hcatalog-unit/pom.xml                    |      2 +-
 itests/hive-blobstore/pom.xml                   |      4 +-
 itests/hive-minikdc/pom.xml                     |      4 +-
 itests/hive-unit-hadoop2/pom.xml                |      2 +-
 itests/hive-unit/pom.xml                        |      4 +-
 itests/qtest-accumulo/pom.xml                   |      4 +-
 itests/qtest-spark/pom.xml                      |      4 +-
 itests/qtest/pom.xml                            |      4 +-
 itests/util/pom.xml                             |      2 +-
 llap-server/pom.xml                             |      2 +-
 metastore/pom.xml                               |      2 +-
 packaging/src/main/assembly/bin.xml             |      2 +-
 packaging/src/main/assembly/src.xml             |      2 +-
 ql/pom.xml                                      |      4 +-
 service/pom.xml                                 |      2 +-
 standalone-metastore/DEV-README                 |      2 +-
 .../findbugs/findbugs-exclude.xml               |     24 +
 standalone-metastore/metastore-common/pom.xml   |    754 +
 .../metastore-common/src/assembly/bin.xml       |    136 +
 .../metastore-common/src/assembly/src.xml       |     53 +
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  94840 ++++++
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  30068 ++
 .../ThriftHiveMetastore_server.skeleton.cpp     |   1079 +
 .../thrift/gen-cpp/hive_metastore_constants.cpp |     67 +
 .../thrift/gen-cpp/hive_metastore_constants.h   |     49 +
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  33217 +++
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  14125 +
 .../hive/metastore/api/AbortTxnRequest.java     |    497 +
 .../hive/metastore/api/AbortTxnsRequest.java    |    438 +
 .../api/AddCheckConstraintRequest.java          |    443 +
 .../api/AddDefaultConstraintRequest.java        |    443 +
 .../metastore/api/AddDynamicPartitions.java     |    959 +
 .../metastore/api/AddForeignKeyRequest.java     |    443 +
 .../api/AddNotNullConstraintRequest.java        |    443 +
 .../metastore/api/AddPartitionsRequest.java     |    955 +
 .../hive/metastore/api/AddPartitionsResult.java |    447 +
 .../metastore/api/AddPrimaryKeyRequest.java     |    443 +
 .../api/AddUniqueConstraintRequest.java         |    443 +
 .../hadoop/hive/metastore/api/AggrStats.java    |    542 +
 .../api/AllocateTableWriteIdsRequest.java       |    915 +
 .../api/AllocateTableWriteIdsResponse.java      |    443 +
 .../metastore/api/AlreadyExistsException.java   |    395 +
 .../hive/metastore/api/AlterCatalogRequest.java |    504 +
 .../hive/metastore/api/AlterISchemaRequest.java |    509 +
 .../hadoop/hive/metastore/api/BasicTxnInfo.java |    907 +
 .../metastore/api/BinaryColumnStatsData.java    |    696 +
 .../metastore/api/BooleanColumnStatsData.java   |    696 +
 .../metastore/api/CacheFileMetadataRequest.java |    703 +
 .../metastore/api/CacheFileMetadataResult.java  |    387 +
 .../hadoop/hive/metastore/api/Catalog.java      |    606 +
 .../metastore/api/CheckConstraintsRequest.java  |    591 +
 .../metastore/api/CheckConstraintsResponse.java |    443 +
 .../hive/metastore/api/CheckLockRequest.java    |    589 +
 .../metastore/api/ClearFileMetadataRequest.java |    438 +
 .../metastore/api/ClearFileMetadataResult.java  |    283 +
 .../hive/metastore/api/ClientCapabilities.java  |    441 +
 .../hive/metastore/api/ClientCapability.java    |     45 +
 .../hive/metastore/api/CmRecycleRequest.java    |    488 +
 .../hive/metastore/api/CmRecycleResponse.java   |    283 +
 .../hive/metastore/api/ColumnStatistics.java    |    549 +
 .../metastore/api/ColumnStatisticsData.java     |    675 +
 .../metastore/api/ColumnStatisticsDesc.java     |    904 +
 .../hive/metastore/api/ColumnStatisticsObj.java |    593 +
 .../hive/metastore/api/CommitTxnRequest.java    |    657 +
 .../hive/metastore/api/CompactionRequest.java   |    977 +
 .../hive/metastore/api/CompactionResponse.java  |    583 +
 .../hive/metastore/api/CompactionType.java      |     45 +
 .../api/ConfigValSecurityException.java         |    395 +
 .../metastore/api/CreateCatalogRequest.java     |    400 +
 .../hive/metastore/api/CreationMetadata.java    |    851 +
 .../api/CurrentNotificationEventId.java         |    387 +
 .../hive/metastore/api/DataOperationType.java   |     57 +
 .../hadoop/hive/metastore/api/Database.java     |   1201 +
 .../apache/hadoop/hive/metastore/api/Date.java  |    387 +
 .../hive/metastore/api/DateColumnStatsData.java |    823 +
 .../hadoop/hive/metastore/api/Decimal.java      |    497 +
 .../metastore/api/DecimalColumnStatsData.java   |    823 +
 .../api/DefaultConstraintsRequest.java          |    591 +
 .../api/DefaultConstraintsResponse.java         |    443 +
 .../metastore/api/DoubleColumnStatsData.java    |    799 +
 .../hive/metastore/api/DropCatalogRequest.java  |    395 +
 .../metastore/api/DropConstraintRequest.java    |    701 +
 .../hive/metastore/api/DropPartitionsExpr.java  |    505 +
 .../metastore/api/DropPartitionsRequest.java    |   1218 +
 .../metastore/api/DropPartitionsResult.java     |    447 +
 .../hive/metastore/api/EnvironmentContext.java  |    447 +
 .../hive/metastore/api/EventRequestType.java    |     48 +
 .../hadoop/hive/metastore/api/FieldSchema.java  |    603 +
 .../metastore/api/FileMetadataExprType.java     |     42 +
 .../metastore/api/FindSchemasByColsResp.java    |    449 +
 .../metastore/api/FindSchemasByColsRqst.java    |    605 +
 .../hive/metastore/api/FireEventRequest.java    |    967 +
 .../metastore/api/FireEventRequestData.java     |    309 +
 .../hive/metastore/api/FireEventResponse.java   |    283 +
 .../hive/metastore/api/ForeignKeysRequest.java  |    814 +
 .../hive/metastore/api/ForeignKeysResponse.java |    443 +
 .../hadoop/hive/metastore/api/Function.java     |   1306 +
 .../hadoop/hive/metastore/api/FunctionType.java |     42 +
 .../metastore/api/GetAllFunctionsResponse.java  |    447 +
 .../hive/metastore/api/GetCatalogRequest.java   |    395 +
 .../hive/metastore/api/GetCatalogResponse.java  |    400 +
 .../hive/metastore/api/GetCatalogsResponse.java |    444 +
 .../api/GetFileMetadataByExprRequest.java       |    773 +
 .../api/GetFileMetadataByExprResult.java        |    553 +
 .../metastore/api/GetFileMetadataRequest.java   |    438 +
 .../metastore/api/GetFileMetadataResult.java    |    540 +
 .../metastore/api/GetOpenTxnsInfoResponse.java  |    542 +
 .../hive/metastore/api/GetOpenTxnsResponse.java |    750 +
 .../api/GetPrincipalsInRoleRequest.java         |    389 +
 .../api/GetPrincipalsInRoleResponse.java        |    443 +
 .../api/GetRoleGrantsForPrincipalRequest.java   |    502 +
 .../api/GetRoleGrantsForPrincipalResponse.java  |    443 +
 .../metastore/api/GetRuntimeStatsRequest.java   |    482 +
 .../hive/metastore/api/GetSerdeRequest.java     |    395 +
 .../hive/metastore/api/GetTableRequest.java     |    711 +
 .../hive/metastore/api/GetTableResult.java      |    394 +
 .../hive/metastore/api/GetTablesRequest.java    |    765 +
 .../hive/metastore/api/GetTablesResult.java     |    443 +
 .../metastore/api/GetValidWriteIdsRequest.java  |    539 +
 .../metastore/api/GetValidWriteIdsResponse.java |    443 +
 .../api/GrantRevokePrivilegeRequest.java        |    620 +
 .../api/GrantRevokePrivilegeResponse.java       |    390 +
 .../metastore/api/GrantRevokeRoleRequest.java   |   1059 +
 .../metastore/api/GrantRevokeRoleResponse.java  |    390 +
 .../hive/metastore/api/GrantRevokeType.java     |     45 +
 .../hive/metastore/api/HeartbeatRequest.java    |    489 +
 .../metastore/api/HeartbeatTxnRangeRequest.java |    482 +
 .../api/HeartbeatTxnRangeResponse.java          |    588 +
 .../hive/metastore/api/HiveObjectPrivilege.java |    833 +
 .../hive/metastore/api/HiveObjectRef.java       |    979 +
 .../hive/metastore/api/HiveObjectType.java      |     54 +
 .../hadoop/hive/metastore/api/ISchema.java      |   1266 +
 .../hadoop/hive/metastore/api/ISchemaName.java  |    603 +
 .../metastore/api/InsertEventRequestData.java   |    855 +
 .../metastore/api/InvalidInputException.java    |    395 +
 .../metastore/api/InvalidObjectException.java   |    395 +
 .../api/InvalidOperationException.java          |    395 +
 .../api/InvalidPartitionException.java          |    395 +
 .../hive/metastore/api/LockComponent.java       |   1158 +
 .../hadoop/hive/metastore/api/LockLevel.java    |     48 +
 .../hadoop/hive/metastore/api/LockRequest.java  |    861 +
 .../hadoop/hive/metastore/api/LockResponse.java |    500 +
 .../hadoop/hive/metastore/api/LockState.java    |     51 +
 .../hadoop/hive/metastore/api/LockType.java     |     48 +
 .../hive/metastore/api/LongColumnStatsData.java |    799 +
 .../api/MapSchemaVersionToSerdeRequest.java     |    504 +
 .../hive/metastore/api/Materialization.java     |    750 +
 .../hive/metastore/api/MetaException.java       |    395 +
 .../hive/metastore/api/MetadataPpdResult.java   |    517 +
 .../hive/metastore/api/NoSuchLockException.java |    395 +
 .../metastore/api/NoSuchObjectException.java    |    395 +
 .../hive/metastore/api/NoSuchTxnException.java  |    395 +
 .../api/NotNullConstraintsRequest.java          |    591 +
 .../api/NotNullConstraintsResponse.java         |    443 +
 .../hive/metastore/api/NotificationEvent.java   |   1112 +
 .../metastore/api/NotificationEventRequest.java |    490 +
 .../api/NotificationEventResponse.java          |    443 +
 .../api/NotificationEventsCountRequest.java     |    598 +
 .../api/NotificationEventsCountResponse.java    |    387 +
 .../hive/metastore/api/OpenTxnRequest.java      |    963 +
 .../hive/metastore/api/OpenTxnsResponse.java    |    438 +
 .../apache/hadoop/hive/metastore/api/Order.java |    497 +
 .../hadoop/hive/metastore/api/Partition.java    |   1335 +
 .../hive/metastore/api/PartitionEventType.java  |     42 +
 .../api/PartitionListComposingSpec.java         |    449 +
 .../hive/metastore/api/PartitionSpec.java       |    932 +
 .../api/PartitionSpecWithSharedSD.java          |    558 +
 .../metastore/api/PartitionValuesRequest.java   |   1328 +
 .../metastore/api/PartitionValuesResponse.java  |    443 +
 .../hive/metastore/api/PartitionValuesRow.java  |    438 +
 .../hive/metastore/api/PartitionWithoutSD.java  |   1016 +
 .../metastore/api/PartitionsByExprRequest.java  |    921 +
 .../metastore/api/PartitionsByExprResult.java   |    542 +
 .../metastore/api/PartitionsStatsRequest.java   |    900 +
 .../metastore/api/PartitionsStatsResult.java    |    490 +
 .../hive/metastore/api/PrimaryKeysRequest.java  |    600 +
 .../hive/metastore/api/PrimaryKeysResponse.java |    443 +
 .../metastore/api/PrincipalPrivilegeSet.java    |    906 +
 .../hive/metastore/api/PrincipalType.java       |     48 +
 .../hadoop/hive/metastore/api/PrivilegeBag.java |    449 +
 .../hive/metastore/api/PrivilegeGrantInfo.java  |    815 +
 .../metastore/api/PutFileMetadataRequest.java   |    710 +
 .../metastore/api/PutFileMetadataResult.java    |    283 +
 .../api/ReplTblWriteIdStateRequest.java         |    952 +
 .../hive/metastore/api/RequestPartsSpec.java    |    438 +
 .../hadoop/hive/metastore/api/ResourceType.java |     48 +
 .../hadoop/hive/metastore/api/ResourceUri.java  |    511 +
 .../apache/hadoop/hive/metastore/api/Role.java  |    601 +
 .../hive/metastore/api/RolePrincipalGrant.java  |   1035 +
 .../hadoop/hive/metastore/api/RuntimeStat.java  |    600 +
 .../hive/metastore/api/SQLCheckConstraint.java  |   1213 +
 .../metastore/api/SQLDefaultConstraint.java     |   1213 +
 .../hive/metastore/api/SQLForeignKey.java       |   1822 +
 .../metastore/api/SQLNotNullConstraint.java     |   1109 +
 .../hive/metastore/api/SQLPrimaryKey.java       |   1210 +
 .../hive/metastore/api/SQLUniqueConstraint.java |   1207 +
 .../hadoop/hive/metastore/api/Schema.java       |    605 +
 .../hive/metastore/api/SchemaCompatibility.java |     51 +
 .../hadoop/hive/metastore/api/SchemaType.java   |     45 +
 .../hive/metastore/api/SchemaValidation.java    |     45 +
 .../hive/metastore/api/SchemaVersion.java       |   1412 +
 .../metastore/api/SchemaVersionDescriptor.java  |    502 +
 .../hive/metastore/api/SchemaVersionState.java  |     63 +
 .../hadoop/hive/metastore/api/SerDeInfo.java    |   1092 +
 .../hadoop/hive/metastore/api/SerdeType.java    |     45 +
 .../api/SetPartitionsStatsRequest.java          |    550 +
 .../api/SetSchemaVersionStateRequest.java       |    516 +
 .../hive/metastore/api/ShowCompactRequest.java  |    283 +
 .../hive/metastore/api/ShowCompactResponse.java |    443 +
 .../api/ShowCompactResponseElement.java         |   1641 +
 .../hive/metastore/api/ShowLocksRequest.java    |    710 +
 .../hive/metastore/api/ShowLocksResponse.java   |    449 +
 .../metastore/api/ShowLocksResponseElement.java |   1929 +
 .../hadoop/hive/metastore/api/SkewedInfo.java   |    834 +
 .../hive/metastore/api/StorageDescriptor.java   |   1748 +
 .../metastore/api/StringColumnStatsData.java    |    791 +
 .../apache/hadoop/hive/metastore/api/Table.java |   2283 +
 .../hadoop/hive/metastore/api/TableMeta.java    |    807 +
 .../hive/metastore/api/TableStatsRequest.java   |    750 +
 .../hive/metastore/api/TableStatsResult.java    |    443 +
 .../hive/metastore/api/TableValidWriteIds.java  |    851 +
 .../hive/metastore/api/ThriftHiveMetastore.java | 239288 ++++++++++++++++
 .../hive/metastore/api/TxnAbortedException.java |    395 +
 .../hadoop/hive/metastore/api/TxnInfo.java      |   1220 +
 .../hive/metastore/api/TxnOpenException.java    |    395 +
 .../hadoop/hive/metastore/api/TxnState.java     |     48 +
 .../hadoop/hive/metastore/api/TxnToWriteId.java |    482 +
 .../apache/hadoop/hive/metastore/api/Type.java  |    768 +
 .../metastore/api/UniqueConstraintsRequest.java |    591 +
 .../api/UniqueConstraintsResponse.java          |    443 +
 .../hive/metastore/api/UnknownDBException.java  |    395 +
 .../api/UnknownPartitionException.java          |    395 +
 .../metastore/api/UnknownTableException.java    |    395 +
 .../hive/metastore/api/UnlockRequest.java       |    387 +
 .../hadoop/hive/metastore/api/Version.java      |    499 +
 .../hive/metastore/api/WMAlterPoolRequest.java  |    504 +
 .../hive/metastore/api/WMAlterPoolResponse.java |    283 +
 .../api/WMAlterResourcePlanRequest.java         |    805 +
 .../api/WMAlterResourcePlanResponse.java        |    398 +
 .../metastore/api/WMAlterTriggerRequest.java    |    398 +
 .../metastore/api/WMAlterTriggerResponse.java   |    283 +
 ...CreateOrDropTriggerToPoolMappingRequest.java |    708 +
 ...reateOrDropTriggerToPoolMappingResponse.java |    283 +
 .../api/WMCreateOrUpdateMappingRequest.java     |    501 +
 .../api/WMCreateOrUpdateMappingResponse.java    |    283 +
 .../hive/metastore/api/WMCreatePoolRequest.java |    398 +
 .../metastore/api/WMCreatePoolResponse.java     |    283 +
 .../api/WMCreateResourcePlanRequest.java        |    504 +
 .../api/WMCreateResourcePlanResponse.java       |    283 +
 .../metastore/api/WMCreateTriggerRequest.java   |    398 +
 .../metastore/api/WMCreateTriggerResponse.java  |    283 +
 .../metastore/api/WMDropMappingRequest.java     |    398 +
 .../metastore/api/WMDropMappingResponse.java    |    283 +
 .../hive/metastore/api/WMDropPoolRequest.java   |    499 +
 .../hive/metastore/api/WMDropPoolResponse.java  |    283 +
 .../api/WMDropResourcePlanRequest.java          |    393 +
 .../api/WMDropResourcePlanResponse.java         |    283 +
 .../metastore/api/WMDropTriggerRequest.java     |    499 +
 .../metastore/api/WMDropTriggerResponse.java    |    283 +
 .../hive/metastore/api/WMFullResourcePlan.java  |   1033 +
 .../api/WMGetActiveResourcePlanRequest.java     |    283 +
 .../api/WMGetActiveResourcePlanResponse.java    |    398 +
 .../api/WMGetAllResourcePlanRequest.java        |    283 +
 .../api/WMGetAllResourcePlanResponse.java       |    447 +
 .../metastore/api/WMGetResourcePlanRequest.java |    393 +
 .../api/WMGetResourcePlanResponse.java          |    398 +
 .../api/WMGetTriggersForResourePlanRequest.java |    393 +
 .../WMGetTriggersForResourePlanResponse.java    |    447 +
 .../hadoop/hive/metastore/api/WMMapping.java    |    804 +
 .../hive/metastore/api/WMNullablePool.java      |    901 +
 .../metastore/api/WMNullableResourcePlan.java   |    918 +
 .../hadoop/hive/metastore/api/WMPool.java       |    802 +
 .../metastore/api/WMPoolSchedulingPolicy.java   |     45 +
 .../hive/metastore/api/WMPoolTrigger.java       |    490 +
 .../hive/metastore/api/WMResourcePlan.java      |    720 +
 .../metastore/api/WMResourcePlanStatus.java     |     48 +
 .../hadoop/hive/metastore/api/WMTrigger.java    |    809 +
 .../api/WMValidateResourcePlanRequest.java      |    393 +
 .../api/WMValidateResourcePlanResponse.java     |    597 +
 .../hive/metastore/api/WriteEventInfo.java      |   1012 +
 .../api/WriteNotificationLogRequest.java        |    949 +
 .../api/WriteNotificationLogResponse.java       |    283 +
 .../metastore/api/hive_metastoreConstants.java  |     89 +
 .../gen-php/metastore/ThriftHiveMetastore.php   |  59951 ++++
 .../src/gen/thrift/gen-php/metastore/Types.php  |  32146 +++
 .../src/gen/thrift/gen-py/__init__.py           |      0
 .../hive_metastore/ThriftHiveMetastore-remote   |   1634 +
 .../hive_metastore/ThriftHiveMetastore.py       |  48956 ++++
 .../thrift/gen-py/hive_metastore/__init__.py    |      1 +
 .../thrift/gen-py/hive_metastore/constants.py   |     36 +
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  23076 ++
 .../thrift/gen-rb/hive_metastore_constants.rb   |     59 +
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   5322 +
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |  13779 +
 .../hadoop/hive/common/StatsSetupConst.java     |    335 +
 .../common/classification/RetrySemantics.java   |     57 +
 .../common/ndv/NumDistinctValueEstimator.java   |     51 +
 .../ndv/NumDistinctValueEstimatorFactory.java   |     75 +
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |    359 +
 .../hive/common/ndv/fm/FMSketchUtils.java       |    132 +
 .../hive/common/ndv/hll/HLLConstants.java       |    933 +
 .../hive/common/ndv/hll/HLLDenseRegister.java   |    202 +
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |     50 +
 .../hive/common/ndv/hll/HLLSparseRegister.java  |    261 +
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |    664 +
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |    409 +
 .../hive/metastore/AcidEventListener.java       |    146 +
 .../hive/metastore/AggregateStatsCache.java     |    571 +
 .../hadoop/hive/metastore/AlterHandler.java     |    202 +
 .../apache/hadoop/hive/metastore/Batchable.java |     86 +
 .../hadoop/hive/metastore/ColumnType.java       |    301 +
 .../hadoop/hive/metastore/DatabaseProduct.java  |     75 +
 .../apache/hadoop/hive/metastore/Deadline.java  |    172 +
 .../hive/metastore/DeadlineException.java       |     29 +
 .../hive/metastore/DefaultHiveMetaHook.java     |     51 +
 .../DefaultMetaStoreFilterHookImpl.java         |     93 +
 .../DefaultPartitionExpressionProxy.java        |     57 +
 .../metastore/DefaultStorageSchemaReader.java   |     38 +
 .../hadoop/hive/metastore/FileFormatProxy.java  |     64 +
 .../hive/metastore/FileMetadataHandler.java     |    109 +
 .../hive/metastore/FileMetadataManager.java     |    119 +
 .../hive/metastore/HMSMetricsListener.java      |     90 +
 .../hadoop/hive/metastore/HiveAlterHandler.java |    948 +
 .../hive/metastore/HiveMetaException.java       |     42 +
 .../hadoop/hive/metastore/HiveMetaHook.java     |    122 +
 .../hive/metastore/HiveMetaHookLoader.java      |     39 +
 .../hadoop/hive/metastore/HiveMetaStore.java    |   9354 +
 .../hive/metastore/HiveMetaStoreClient.java     |   3326 +
 .../hive/metastore/HiveMetaStoreFsImpl.java     |     55 +
 .../hive/metastore/IExtrapolatePartStatus.java  |     85 +
 .../hadoop/hive/metastore/IHMSHandler.java      |    104 +
 .../hadoop/hive/metastore/IMetaStoreClient.java |   3699 +
 .../hive/metastore/IMetaStoreSchemaInfo.java    |    115 +
 .../metastore/LinearExtrapolatePartStatus.java  |    106 +
 .../hive/metastore/LockComponentBuilder.java    |    121 +
 .../hive/metastore/LockRequestBuilder.java      |    168 +
 .../MaterializationsCacheCleanerTask.java       |     63 +
 .../MaterializationsInvalidationCache.java      |    543 +
 .../MaterializationsRebuildLockCleanerTask.java |     61 +
 .../MaterializationsRebuildLockHandler.java     |    216 +
 .../hive/metastore/MetaStoreDirectSql.java      |   2817 +
 .../metastore/MetaStoreEndFunctionContext.java  |     59 +
 .../metastore/MetaStoreEndFunctionListener.java |     58 +
 .../hive/metastore/MetaStoreEventListener.java  |    306 +
 .../MetaStoreEventListenerConstants.java        |     41 +
 .../hadoop/hive/metastore/MetaStoreFS.java      |     43 +
 .../hive/metastore/MetaStoreFilterHook.java     |    147 +
 .../hadoop/hive/metastore/MetaStoreInit.java    |    109 +
 .../hive/metastore/MetaStoreInitContext.java    |     27 +
 .../hive/metastore/MetaStoreInitListener.java   |     49 +
 .../metastore/MetaStoreListenerNotifier.java    |    375 +
 .../metastore/MetaStorePreEventListener.java    |     57 +
 .../hive/metastore/MetaStoreSchemaInfo.java     |    246 +
 .../metastore/MetaStoreSchemaInfoFactory.java   |     64 +
 .../hadoop/hive/metastore/MetaStoreThread.java  |     58 +
 .../hadoop/hive/metastore/MetadataStore.java    |     52 +
 .../hive/metastore/MetastoreTaskThread.java     |     38 +
 .../hadoop/hive/metastore/ObjectStore.java      |  12219 +
 .../hive/metastore/PartFilterExprUtil.java      |    165 +
 .../hive/metastore/PartitionDropOptions.java    |     54 +
 .../metastore/PartitionExpressionProxy.java     |     73 +
 .../apache/hadoop/hive/metastore/RawStore.java  |   1682 +
 .../hadoop/hive/metastore/RawStoreProxy.java    |    114 +
 .../hive/metastore/ReplChangeManager.java       |    501 +
 .../hive/metastore/RetryingHMSHandler.java      |    232 +
 .../hive/metastore/RetryingMetaStoreClient.java |    341 +
 .../hive/metastore/RuntimeStatsCleanerTask.java |     66 +
 .../metastore/SessionPropertiesListener.java    |     46 +
 .../hive/metastore/StatObjectConverter.java     |    892 +
 .../hive/metastore/StorageSchemaReader.java     |     46 +
 .../hive/metastore/TServerSocketKeepAlive.java  |     47 +
 .../hive/metastore/TSetIpAddressProcessor.java  |     62 +
 .../hive/metastore/TUGIBasedProcessor.java      |    183 +
 .../apache/hadoop/hive/metastore/TableType.java |     26 +
 .../hadoop/hive/metastore/ThreadPool.java       |     63 +
 .../TransactionalMetaStoreEventListener.java    |     39 +
 .../TransactionalValidationListener.java        |    487 +
 .../apache/hadoop/hive/metastore/Warehouse.java |    756 +
 .../annotation/MetastoreVersionAnnotation.java  |     85 +
 .../hive/metastore/annotation/NoReconnect.java  |     29 +
 .../api/InitializeTableWriteIdsRequest.java     |     42 +
 .../hive/metastore/api/utils/DecimalUtils.java  |     49 +
 .../hive/metastore/cache/ByteArrayWrapper.java  |     45 +
 .../hadoop/hive/metastore/cache/CacheUtils.java |    136 +
 .../hive/metastore/cache/CachedStore.java       |   2532 +
 .../hive/metastore/cache/SharedCache.java       |   1650 +
 .../client/builder/CatalogBuilder.java          |     62 +
 .../client/builder/ConstraintBuilder.java       |    115 +
 .../client/builder/DatabaseBuilder.java         |    122 +
 .../client/builder/FunctionBuilder.java         |    143 +
 .../GrantRevokePrivilegeRequestBuilder.java     |     63 +
 .../builder/HiveObjectPrivilegeBuilder.java     |     69 +
 .../client/builder/HiveObjectRefBuilder.java    |     69 +
 .../client/builder/ISchemaBuilder.java          |    102 +
 .../client/builder/PartitionBuilder.java        |    119 +
 .../builder/PrivilegeGrantInfoBuilder.java      |     84 +
 .../metastore/client/builder/RoleBuilder.java   |     55 +
 .../builder/SQLCheckConstraintBuilder.java      |     51 +
 .../builder/SQLDefaultConstraintBuilder.java    |     51 +
 .../client/builder/SQLForeignKeyBuilder.java    |    103 +
 .../builder/SQLNotNullConstraintBuilder.java    |     52 +
 .../client/builder/SQLPrimaryKeyBuilder.java    |     52 +
 .../builder/SQLUniqueConstraintBuilder.java     |     46 +
 .../client/builder/SchemaVersionBuilder.java    |    114 +
 .../client/builder/SerdeAndColsBuilder.java     |    124 +
 .../builder/StorageDescriptorBuilder.java       |    163 +
 .../metastore/client/builder/TableBuilder.java  |    224 +
 .../aggr/BinaryColumnStatsAggregator.java       |     61 +
 .../aggr/BooleanColumnStatsAggregator.java      |     62 +
 .../columnstats/aggr/ColumnStatsAggregator.java |     35 +
 .../aggr/ColumnStatsAggregatorFactory.java      |    113 +
 .../aggr/DateColumnStatsAggregator.java         |    360 +
 .../aggr/DecimalColumnStatsAggregator.java      |    375 +
 .../aggr/DoubleColumnStatsAggregator.java       |    348 +
 .../aggr/IExtrapolatePartStatus.java            |     47 +
 .../aggr/LongColumnStatsAggregator.java         |    348 +
 .../aggr/StringColumnStatsAggregator.java       |    304 +
 .../cache/DateColumnStatsDataInspector.java     |    124 +
 .../cache/DecimalColumnStatsDataInspector.java  |    124 +
 .../cache/DoubleColumnStatsDataInspector.java   |    124 +
 .../cache/LongColumnStatsDataInspector.java     |    124 +
 .../cache/StringColumnStatsDataInspector.java   |    125 +
 .../merge/BinaryColumnStatsMerger.java          |     35 +
 .../merge/BooleanColumnStatsMerger.java         |     35 +
 .../columnstats/merge/ColumnStatsMerger.java    |     31 +
 .../merge/ColumnStatsMergerFactory.java         |    120 +
 .../merge/DateColumnStatsMerger.java            |     59 +
 .../merge/DecimalColumnStatsMerger.java         |     85 +
 .../merge/DoubleColumnStatsMerger.java          |     54 +
 .../merge/LongColumnStatsMerger.java            |     54 +
 .../merge/StringColumnStatsMerger.java          |     54 +
 .../metastore/conf/ConfTemplatePrinter.java     |    150 +
 .../hive/metastore/conf/EnumValidator.java      |     26 +
 .../hive/metastore/conf/MetastoreConf.java      |   1688 +
 .../hive/metastore/conf/RangeValidator.java     |     38 +
 .../hive/metastore/conf/SizeValidator.java      |    110 +
 .../hive/metastore/conf/StringSetValidator.java |     51 +
 .../hive/metastore/conf/TimeValidator.java      |     67 +
 .../hadoop/hive/metastore/conf/Validator.java   |     87 +
 .../datasource/BoneCPDataSourceProvider.java    |     87 +
 .../datasource/DataSourceProvider.java          |     79 +
 .../datasource/DataSourceProviderFactory.java   |     66 +
 .../datasource/DbCPDataSourceProvider.java      |    117 +
 .../datasource/HikariCPDataSourceProvider.java  |     89 +
 .../hive/metastore/datasource/package-info.java |     23 +
 .../hive/metastore/events/AbortTxnEvent.java    |     51 +
 .../hive/metastore/events/AcidWriteEvent.java   |     91 +
 .../metastore/events/AddForeignKeyEvent.java    |     41 +
 .../events/AddNotNullConstraintEvent.java       |     42 +
 .../metastore/events/AddPartitionEvent.java     |     84 +
 .../metastore/events/AddPrimaryKeyEvent.java    |     42 +
 .../metastore/events/AddSchemaVersionEvent.java |     40 +
 .../events/AddUniqueConstraintEvent.java        |     42 +
 .../metastore/events/AllocWriteIdEvent.java     |     57 +
 .../metastore/events/AlterCatalogEvent.java     |     44 +
 .../metastore/events/AlterDatabaseEvent.java    |     56 +
 .../metastore/events/AlterISchemaEvent.java     |     45 +
 .../metastore/events/AlterPartitionEvent.java   |     75 +
 .../events/AlterSchemaVersionEvent.java         |     46 +
 .../hive/metastore/events/AlterTableEvent.java  |     63 +
 .../hive/metastore/events/CommitTxnEvent.java   |     51 +
 .../metastore/events/ConfigChangeEvent.java     |     52 +
 .../metastore/events/CreateCatalogEvent.java    |     39 +
 .../metastore/events/CreateDatabaseEvent.java   |     43 +
 .../metastore/events/CreateFunctionEvent.java   |     43 +
 .../metastore/events/CreateISchemaEvent.java    |     39 +
 .../hive/metastore/events/CreateTableEvent.java |     43 +
 .../hive/metastore/events/DropCatalogEvent.java |     39 +
 .../metastore/events/DropConstraintEvent.java   |     57 +
 .../metastore/events/DropDatabaseEvent.java     |     43 +
 .../metastore/events/DropFunctionEvent.java     |     43 +
 .../hive/metastore/events/DropISchemaEvent.java |     39 +
 .../metastore/events/DropPartitionEvent.java    |     70 +
 .../events/DropSchemaVersionEvent.java          |     40 +
 .../hive/metastore/events/DropTableEvent.java   |     54 +
 .../hive/metastore/events/EventCleanerTask.java |     66 +
 .../hive/metastore/events/InsertEvent.java      |    132 +
 .../hive/metastore/events/ListenerEvent.java    |    187 +
 .../events/LoadPartitionDoneEvent.java          |     57 +
 .../hive/metastore/events/OpenTxnEvent.java     |     51 +
 .../metastore/events/PreAddPartitionEvent.java  |     79 +
 .../events/PreAddSchemaVersionEvent.java        |     39 +
 .../metastore/events/PreAlterCatalogEvent.java  |     40 +
 .../metastore/events/PreAlterDatabaseEvent.java |     47 +
 .../metastore/events/PreAlterISchemaEvent.java  |     44 +
 .../events/PreAlterPartitionEvent.java          |     65 +
 .../events/PreAlterSchemaVersionEvent.java      |     45 +
 .../metastore/events/PreAlterTableEvent.java    |     53 +
 .../events/PreAuthorizationCallEvent.java       |     33 +
 .../metastore/events/PreCreateCatalogEvent.java |     39 +
 .../events/PreCreateDatabaseEvent.java          |     43 +
 .../metastore/events/PreCreateISchemaEvent.java |     39 +
 .../metastore/events/PreCreateTableEvent.java   |     43 +
 .../metastore/events/PreDropCatalogEvent.java   |     39 +
 .../metastore/events/PreDropDatabaseEvent.java  |     43 +
 .../metastore/events/PreDropISchemaEvent.java   |     39 +
 .../metastore/events/PreDropPartitionEvent.java |     67 +
 .../events/PreDropSchemaVersionEvent.java       |     39 +
 .../metastore/events/PreDropTableEvent.java     |     55 +
 .../hive/metastore/events/PreEventContext.java  |     82 +
 .../events/PreLoadPartitionDoneEvent.java       |     64 +
 .../metastore/events/PreReadCatalogEvent.java   |     39 +
 .../metastore/events/PreReadDatabaseEvent.java  |     46 +
 .../metastore/events/PreReadISchemaEvent.java   |     39 +
 .../metastore/events/PreReadTableEvent.java     |     47 +
 .../events/PreReadhSchemaVersionEvent.java      |     36 +
 .../metastore/hooks/JDOConnectionURLHook.java   |     52 +
 .../hive/metastore/hooks/URIResolverHook.java   |     37 +
 .../metastore/messaging/AbortTxnMessage.java    |     36 +
 .../metastore/messaging/AcidWriteMessage.java   |     50 +
 .../messaging/AddForeignKeyMessage.java         |     36 +
 .../messaging/AddNotNullConstraintMessage.java  |     36 +
 .../messaging/AddPartitionMessage.java          |     68 +
 .../messaging/AddPrimaryKeyMessage.java         |     35 +
 .../messaging/AddUniqueConstraintMessage.java   |     36 +
 .../messaging/AllocWriteIdMessage.java          |     36 +
 .../messaging/AlterCatalogMessage.java          |     29 +
 .../messaging/AlterDatabaseMessage.java         |     36 +
 .../messaging/AlterPartitionMessage.java        |     69 +
 .../metastore/messaging/AlterTableMessage.java  |     58 +
 .../metastore/messaging/CommitTxnMessage.java   |     59 +
 .../messaging/CreateCatalogMessage.java         |     25 +
 .../messaging/CreateDatabaseMessage.java        |     31 +
 .../messaging/CreateFunctionMessage.java        |     46 +
 .../metastore/messaging/CreateTableMessage.java |     53 +
 .../metastore/messaging/DropCatalogMessage.java |     25 +
 .../messaging/DropConstraintMessage.java        |     29 +
 .../messaging/DropDatabaseMessage.java          |     27 +
 .../messaging/DropFunctionMessage.java          |     38 +
 .../messaging/DropPartitionMessage.java         |     49 +
 .../metastore/messaging/DropTableMessage.java   |     46 +
 .../hive/metastore/messaging/EventMessage.java  |    127 +
 .../hive/metastore/messaging/EventUtils.java    |    202 +
 .../hive/metastore/messaging/InsertMessage.java |     75 +
 .../messaging/MessageDeserializer.java          |    200 +
 .../metastore/messaging/MessageFactory.java     |    341 +
 .../metastore/messaging/OpenTxnMessage.java     |     38 +
 .../metastore/messaging/PartitionFiles.java     |     53 +
 .../messaging/event/filters/AndFilter.java      |     39 +
 .../messaging/event/filters/BasicFilter.java    |     33 +
 .../event/filters/DatabaseAndTableFilter.java   |     65 +
 .../event/filters/EventBoundaryFilter.java      |     34 +
 .../event/filters/MessageFormatFilter.java      |     36 +
 .../messaging/json/JSONAbortTxnMessage.java     |     88 +
 .../messaging/json/JSONAcidWriteMessage.java    |    150 +
 .../json/JSONAddForeignKeyMessage.java          |    102 +
 .../json/JSONAddNotNullConstraintMessage.java   |     97 +
 .../messaging/json/JSONAddPartitionMessage.java |    175 +
 .../json/JSONAddPrimaryKeyMessage.java          |    102 +
 .../json/JSONAddUniqueConstraintMessage.java    |     99 +
 .../messaging/json/JSONAllocWriteIdMessage.java |    113 +
 .../messaging/json/JSONAlterCatalogMessage.java |     90 +
 .../json/JSONAlterDatabaseMessage.java          |     97 +
 .../json/JSONAlterPartitionMessage.java         |    153 +
 .../messaging/json/JSONAlterTableMessage.java   |    128 +
 .../messaging/json/JSONCommitTxnMessage.java    |    183 +
 .../json/JSONCreateCatalogMessage.java          |     80 +
 .../json/JSONCreateDatabaseMessage.java         |     85 +
 .../json/JSONCreateFunctionMessage.java         |     87 +
 .../messaging/json/JSONCreateTableMessage.java  |    134 +
 .../messaging/json/JSONDropCatalogMessage.java  |     67 +
 .../json/JSONDropConstraintMessage.java         |     91 +
 .../messaging/json/JSONDropDatabaseMessage.java |     72 +
 .../messaging/json/JSONDropFunctionMessage.java |     79 +
 .../json/JSONDropPartitionMessage.java          |    135 +
 .../messaging/json/JSONDropTableMessage.java    |    121 +
 .../messaging/json/JSONInsertMessage.java       |    148 +
 .../messaging/json/JSONMessageDeserializer.java |    273 +
 .../messaging/json/JSONMessageFactory.java      |    402 +
 .../messaging/json/JSONOpenTxnMessage.java      |    106 +
 .../hive/metastore/metrics/JsonReporter.java    |    223 +
 .../hive/metastore/metrics/JvmPauseMonitor.java |    222 +
 .../hadoop/hive/metastore/metrics/Metrics.java  |    244 +
 .../metastore/metrics/MetricsConstants.java     |     46 +
 .../hive/metastore/metrics/PerfLogger.java      |    194 +
 .../hadoop/hive/metastore/model/MCatalog.java   |     58 +
 .../hive/metastore/model/MColumnDescriptor.java |     51 +
 .../hive/metastore/model/MConstraint.java       |    214 +
 .../hive/metastore/model/MCreationMetadata.java |     87 +
 .../hive/metastore/model/MDBPrivilege.java      |    142 +
 .../hadoop/hive/metastore/model/MDatabase.java  |    157 +
 .../hive/metastore/model/MDelegationToken.java  |     45 +
 .../hive/metastore/model/MFieldSchema.java      |     80 +
 .../hadoop/hive/metastore/model/MFunction.java  |    119 +
 .../hive/metastore/model/MGlobalPrivilege.java  |    130 +
 .../hadoop/hive/metastore/model/MISchema.java   |    107 +
 .../hadoop/hive/metastore/model/MIndex.java     |    200 +
 .../hadoop/hive/metastore/model/MMasterKey.java |     55 +
 .../metastore/model/MMetastoreDBProperties.java |     56 +
 .../hive/metastore/model/MNotificationLog.java  |    108 +
 .../metastore/model/MNotificationNextId.java    |     42 +
 .../hadoop/hive/metastore/model/MOrder.java     |     62 +
 .../hadoop/hive/metastore/model/MPartition.java |    155 +
 .../model/MPartitionColumnPrivilege.java        |    171 +
 .../model/MPartitionColumnStatistics.java       |    281 +
 .../hive/metastore/model/MPartitionEvent.java   |     97 +
 .../metastore/model/MPartitionPrivilege.java    |    149 +
 .../hive/metastore/model/MPrincipalDesc.java    |     59 +
 .../hive/metastore/model/MResourceUri.java      |     49 +
 .../hadoop/hive/metastore/model/MRole.java      |     80 +
 .../hadoop/hive/metastore/model/MRoleMap.java   |    120 +
 .../hive/metastore/model/MRuntimeStat.java      |     59 +
 .../hive/metastore/model/MSchemaVersion.java    |    127 +
 .../hadoop/hive/metastore/model/MSerDeInfo.java |    127 +
 .../metastore/model/MStorageDescriptor.java     |    277 +
 .../hive/metastore/model/MStringList.java       |     62 +
 .../hadoop/hive/metastore/model/MTable.java     |    273 +
 .../metastore/model/MTableColumnPrivilege.java  |    170 +
 .../metastore/model/MTableColumnStatistics.java |    272 +
 .../hive/metastore/model/MTablePrivilege.java   |    149 +
 .../model/MTxnWriteNotificationLog.java         |    123 +
 .../hadoop/hive/metastore/model/MType.java      |    105 +
 .../hive/metastore/model/MVersionTable.java     |     57 +
 .../hadoop/hive/metastore/model/MWMMapping.java |     83 +
 .../hadoop/hive/metastore/model/MWMPool.java    |     89 +
 .../hive/metastore/model/MWMResourcePlan.java   |    105 +
 .../hadoop/hive/metastore/model/MWMTrigger.java |     89 +
 .../hive/metastore/parser/ExpressionTree.java   |    606 +
 .../hadoop/hive/metastore/parser/Filter.g       |    486 +
 .../hive/metastore/parser/package-info.java     |     23 +
 .../spec/CompositePartitionSpecProxy.java       |    258 +
 .../spec/PartitionListComposingSpecProxy.java   |    209 +
 .../partition/spec/PartitionSpecProxy.java      |    220 +
 .../spec/PartitionSpecWithSharedSDProxy.java    |    192 +
 .../hive/metastore/security/DBTokenStore.java   |    180 +
 .../security/DelegationTokenIdentifier.java     |     52 +
 .../security/DelegationTokenSecretManager.java  |    134 +
 .../security/DelegationTokenSelector.java       |     33 +
 .../security/DelegationTokenStore.java          |    116 +
 .../metastore/security/DelegationTokenTool.java |    252 +
 .../security/HadoopThriftAuthBridge.java        |    700 +
 .../security/HadoopThriftAuthBridge23.java      |    114 +
 .../metastore/security/MemoryTokenStore.java    |    118 +
 .../MetastoreDelegationTokenManager.java        |    180 +
 .../metastore/security/TFilterTransport.java    |     99 +
 .../security/TUGIAssumingTransport.java         |     73 +
 .../security/TUGIContainingTransport.java       |     96 +
 .../TokenStoreDelegationTokenSecretManager.java |    334 +
 .../metastore/security/ZooKeeperTokenStore.java |    474 +
 .../hive/metastore/tools/HiveMetaTool.java      |    490 +
 .../hive/metastore/tools/HiveSchemaHelper.java  |    673 +
 .../metastore/tools/MetastoreSchemaTool.java    |    460 +
 .../hive/metastore/tools/SQLGenerator.java      |    187 +
 .../metastore/tools/SchemaToolCommandLine.java  |    308 +
 .../hive/metastore/tools/SchemaToolTask.java    |     32 +
 .../tools/SchemaToolTaskAlterCatalog.java       |     90 +
 .../tools/SchemaToolTaskCreateCatalog.java      |    132 +
 .../tools/SchemaToolTaskCreateUser.java         |    115 +
 .../metastore/tools/SchemaToolTaskInfo.java     |     43 +
 .../metastore/tools/SchemaToolTaskInit.java     |     73 +
 .../tools/SchemaToolTaskMoveDatabase.java       |     96 +
 .../tools/SchemaToolTaskMoveTable.java          |    142 +
 .../metastore/tools/SchemaToolTaskUpgrade.java  |    116 +
 .../metastore/tools/SchemaToolTaskValidate.java |    630 +
 .../hadoop/hive/metastore/tools/SmokeTest.java  |    102 +
 .../txn/AcidCompactionHistoryService.java       |     71 +
 .../metastore/txn/AcidHouseKeeperService.java   |     71 +
 .../txn/AcidOpenTxnsCounterService.java         |     72 +
 .../hive/metastore/txn/AcidWriteSetService.java |     69 +
 .../hive/metastore/txn/CompactionInfo.java      |    170 +
 .../metastore/txn/CompactionTxnHandler.java     |   1107 +
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |    505 +
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   4906 +
 .../hadoop/hive/metastore/txn/TxnStore.java     |    490 +
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    471 +
 .../hive/metastore/utils/CommonCliOptions.java  |    160 +
 .../hadoop/hive/metastore/utils/FileUtils.java  |    537 +
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |    395 +
 .../metastore/utils/HiveStrictManagedUtils.java |    100 +
 .../hadoop/hive/metastore/utils/JavaUtils.java  |    130 +
 .../hadoop/hive/metastore/utils/LogUtils.java   |    140 +
 .../hive/metastore/utils/MetaStoreUtils.java    |   1840 +
 .../metastore/utils/MetastoreVersionInfo.java   |    133 +
 .../hadoop/hive/metastore/utils/ObjectPair.java |     86 +
 .../hive/metastore/utils/SecurityUtils.java     |    313 +
 .../hive/metastore/utils/StringUtils.java       |    130 +
 .../hive/metastore/utils/StringableMap.java     |     80 +
 .../MetastoreDelegationTokenSupport.java        |     68 +
 .../hadoop/hive/metastore/metastore.proto       |     29 +
 .../main/resources/datanucleus-log4j.properties |     17 +
 .../main/resources/metastore-log4j2.properties  |     71 +
 .../src/main/resources/metastore-site.xml       |     34 +
 .../src/main/resources/package.jdo              |   1420 +
 .../src/main/resources/saveVersion.sh           |     91 +
 .../src/main/resources/thrift-replacements.txt  |    106 +
 .../metastore-common/src/main/scripts/base      |    231 +
 .../src/main/scripts/ext/metastore.sh           |     41 +
 .../src/main/scripts/ext/schemaTool.sh          |     33 +
 .../src/main/scripts/ext/smokeTest.sh           |     33 +
 .../src/main/scripts/metastore-config.sh        |     69 +
 .../src/main/scripts/schematool                 |     21 +
 .../src/main/scripts/start-metastore            |     22 +
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |    405 +
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |    692 +
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |    710 +
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |    710 +
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |     62 +
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |     22 +
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |     59 +
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |      5 +
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |    283 +
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |     49 +
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |      6 +
 .../src/main/sql/derby/upgrade.order.derby      |     18 +
 .../src/main/sql/mssql/create-user.mssql.sql    |      5 +
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |    947 +
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |   1246 +
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   1271 +
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   1272 +
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |     73 +
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |     39 +
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |     43 +
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |      7 +
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |    352 +
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |     51 +
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |      6 +
 .../src/main/sql/mssql/upgrade.order.mssql      |     12 +
 .../src/main/sql/mysql/create-user.mysql.sql    |      8 +
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |    910 +
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |   1183 +
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   1208 +
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   1208 +
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |     75 +
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |     42 +
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |     43 +
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |      8 +
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |    326 +
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |     51 +
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |      6 +
 .../src/main/sql/mysql/upgrade.order.mysql      |     18 +
 .../src/main/sql/oracle/create-user.oracle.sql  |      3 +
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |    856 +
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |   1140 +
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   1165 +
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   1165 +
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |     83 +
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |     39 +
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |     58 +
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |      7 +
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |    342 +
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |     51 +
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |      6 +
 .../src/main/sql/oracle/upgrade.order.oracle    |     14 +
 .../main/sql/postgres/create-user.postgres.sql  |      2 +
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |   1562 +
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |   1827 +
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   1856 +
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   1858 +
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |     73 +
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |     40 +
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |     39 +
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |      8 +
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |    360 +
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |     53 +
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |      6 +
 .../main/sql/postgres/upgrade.order.postgres    |     18 +
 .../src/main/thrift/hive_metastore.thrift       |   2275 +
 .../hadoop/hive/common/TestStatsSetupConst.java |    114 +
 .../ndv/fm/TestFMSketchSerialization.java       |    101 +
 .../hive/common/ndv/hll/TestHLLNoBias.java      |    117 +
 .../common/ndv/hll/TestHLLSerialization.java    |    270 +
 .../hive/common/ndv/hll/TestHyperLogLog.java    |    338 +
 .../common/ndv/hll/TestHyperLogLogDense.java    |     85 +
 .../common/ndv/hll/TestHyperLogLogMerge.java    |    147 +
 .../common/ndv/hll/TestHyperLogLogSparse.java   |     84 +
 .../common/ndv/hll/TestSparseEncodeHash.java    |     59 +
 .../metastore/AlternateFailurePreListener.java  |     62 +
 .../metastore/DummyEndFunctionListener.java     |     47 +
 .../metastore/DummyJdoConnectionUrlHook.java    |     45 +
 .../hadoop/hive/metastore/DummyListener.java    |    126 +
 .../metastore/DummyMetaStoreInitListener.java   |     39 +
 .../hadoop/hive/metastore/DummyPreListener.java |     49 +
 .../DummyRawStoreControlledCommit.java          |   1226 +
 .../DummyRawStoreForJdoConnection.java          |   1212 +
 .../apache/hadoop/hive/metastore/FakeDerby.java |    404 +
 .../HiveMetaStoreClientPreCatalog.java          |   3427 +
 .../InjectableBehaviourObjectStore.java         |    211 +
 .../hive/metastore/IpAddressListener.java       |    102 +
 .../hive/metastore/MetaStoreTestUtils.java      |    291 +
 .../MockPartitionExpressionForMetastore.java    |     58 +
 .../hive/metastore/NonCatCallsWithCatalog.java  |   1158 +
 .../hadoop/hive/metastore/TestAdminUser.java    |     49 +
 .../hive/metastore/TestAggregateStatsCache.java |    272 +
 .../metastore/TestCatalogNonDefaultClient.java  |     74 +
 .../metastore/TestCatalogNonDefaultSvr.java     |     68 +
 .../hive/metastore/TestCatalogOldClient.java    |     44 +
 .../hadoop/hive/metastore/TestDeadline.java     |    130 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |     51 +
 .../hadoop/hive/metastore/TestFilterHooks.java  |    254 +
 .../hive/metastore/TestHiveAlterHandler.java    |    121 +
 .../hive/metastore/TestHiveMetaStore.java       |   3103 +
 .../metastore/TestHiveMetaStoreGetMetaConf.java |    115 +
 .../TestHiveMetaStorePartitionSpecs.java        |    383 +
 .../TestHiveMetaStoreSchemaMethods.java         |   1248 +
 .../metastore/TestHiveMetaStoreTimeout.java     |    142 +
 .../hive/metastore/TestHiveMetaStoreTxns.java   |    267 +
 ...TestHiveMetaStoreWithEnvironmentContext.java |    191 +
 .../hive/metastore/TestHiveMetastoreCli.java    |     68 +
 .../hive/metastore/TestLockRequestBuilder.java  |    587 +
 .../hive/metastore/TestMarkPartition.java       |    118 +
 .../hive/metastore/TestMarkPartitionRemote.java |     34 +
 .../TestMetaStoreConnectionUrlHook.java         |     49 +
 .../TestMetaStoreEndFunctionListener.java       |    146 +
 .../metastore/TestMetaStoreEventListener.java   |    471 +
 .../TestMetaStoreEventListenerOnlyOnCommit.java |    121 +
 .../TestMetaStoreEventListenerWithOldConf.java  |    129 +
 .../metastore/TestMetaStoreInitListener.java    |     56 +
 .../metastore/TestMetaStoreListenersError.java  |     97 +
 ...stMetaStoreMaterializationsCacheCleaner.java |    328 +
 .../metastore/TestMetaStoreSchemaFactory.java   |     72 +
 .../hive/metastore/TestMetaStoreSchemaInfo.java |     55 +
 .../hadoop/hive/metastore/TestObjectStore.java  |    904 +
 .../metastore/TestObjectStoreInitRetry.java     |    135 +
 .../metastore/TestObjectStoreSchemaMethods.java |    602 +
 .../hadoop/hive/metastore/TestOldSchema.java    |    233 +
 .../TestPartitionNameWhitelistValidation.java   |    125 +
 .../hive/metastore/TestRawStoreProxy.java       |     67 +
 .../hive/metastore/TestRemoteHiveMetaStore.java |     64 +
 .../TestRemoteHiveMetaStoreIpAddress.java       |     66 +
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |     31 +
 .../TestRetriesInRetryingHMSHandler.java        |    111 +
 .../hive/metastore/TestRetryingHMSHandler.java  |     82 +
 .../metastore/TestSetUGIOnBothClientServer.java |     34 +
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |     35 +
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |     35 +
 .../apache/hadoop/hive/metastore/TestStats.java |    732 +
 .../hive/metastore/VerifyingObjectStore.java    |    219 +
 .../annotation/MetastoreCheckinTest.java        |     25 +
 .../metastore/annotation/MetastoreTest.java     |     24 +
 .../metastore/annotation/MetastoreUnitTest.java |     25 +
 .../hive/metastore/cache/TestCachedStore.java   |   1075 +
 .../metastore/cache/TestCatalogCaching.java     |    142 +
 .../metastore/client/MetaStoreClientTest.java   |     95 +
 .../client/MetaStoreFactoryForTests.java        |    112 +
 .../metastore/client/TestAddPartitions.java     |   1736 +
 .../client/TestAddPartitionsFromPartSpec.java   |   1267 +
 .../metastore/client/TestAlterPartitions.java   |   1117 +
 .../metastore/client/TestAppendPartitions.java  |    594 +
 .../hive/metastore/client/TestCatalogs.java     |    267 +
 .../metastore/client/TestCheckConstraint.java   |    363 +
 .../hive/metastore/client/TestDatabases.java    |    634 +
 .../metastore/client/TestDefaultConstraint.java |    363 +
 .../metastore/client/TestDropPartitions.java    |    659 +
 .../client/TestExchangePartitions.java          |   1337 +
 .../hive/metastore/client/TestForeignKey.java   |    538 +
 .../hive/metastore/client/TestFunctions.java    |    765 +
 .../metastore/client/TestGetPartitions.java     |    608 +
 .../hive/metastore/client/TestGetTableMeta.java |    330 +
 .../metastore/client/TestListPartitions.java    |   1522 +
 .../metastore/client/TestNotNullConstraint.java |    355 +
 .../hive/metastore/client/TestPrimaryKey.java   |    468 +
 .../hive/metastore/client/TestRuntimeStats.java |    154 +
 .../TestTablesCreateDropAlterTruncate.java      |   1384 +
 .../metastore/client/TestTablesGetExists.java   |    514 +
 .../hive/metastore/client/TestTablesList.java   |    320 +
 .../metastore/client/TestUniqueConstraint.java  |    356 +
 .../hive/metastore/client/package-info.java     |     22 +
 .../merge/DecimalColumnStatsMergerTest.java     |    235 +
 .../hive/metastore/conf/TestMetastoreConf.java  |    433 +
 .../TestDataSourceProviderFactory.java          |    248 +
 .../hive/metastore/dbinstall/DbInstallBase.java |    265 +
 .../hive/metastore/dbinstall/ITestMysql.java    |     82 +
 .../hive/metastore/dbinstall/ITestOracle.java   |     83 +
 .../hive/metastore/dbinstall/ITestPostgres.java |     82 +
 .../metastore/dbinstall/ITestSqlServer.java     |     84 +
 .../json/TestJSONMessageDeserializer.java       |    115 +
 .../hive/metastore/metrics/TestMetrics.java     |    164 +
 .../minihms/AbstractMetaStoreService.java       |    173 +
 .../minihms/ClusterMetaStoreForTests.java       |     32 +
 .../minihms/EmbeddedMetaStoreForTests.java      |     33 +
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |     76 +
 .../minihms/RemoteMetaStoreForTests.java        |     43 +
 .../hive/metastore/minihms/package-info.java    |     23 +
 .../tools/TestMetastoreSchemaTool.java          |     70 +
 .../tools/TestSchemaToolForMetastore.java       |    534 +
 .../metastore/txn/TestTxnHandlerNegative.java   |     58 +
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |    239 +
 .../hive/metastore/utils/TestHdfsUtils.java     |    348 +
 .../metastore/utils/TestMetaStoreUtils.java     |    291 +
 .../src/test/resources/log4j2.properties        |     35 +
 standalone-metastore/pom.xml                    |   1000 +-
 standalone-metastore/src/assembly/bin.xml       |    136 -
 standalone-metastore/src/assembly/src.xml       |     53 -
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.cpp  |  94840 ------
 .../gen/thrift/gen-cpp/ThriftHiveMetastore.h    |  30068 --
 .../ThriftHiveMetastore_server.skeleton.cpp     |   1079 -
 .../thrift/gen-cpp/hive_metastore_constants.cpp |     67 -
 .../thrift/gen-cpp/hive_metastore_constants.h   |     49 -
 .../gen/thrift/gen-cpp/hive_metastore_types.cpp |  33217 ---
 .../gen/thrift/gen-cpp/hive_metastore_types.h   |  14125 -
 .../hive/metastore/api/AbortTxnRequest.java     |    497 -
 .../hive/metastore/api/AbortTxnsRequest.java    |    438 -
 .../api/AddCheckConstraintRequest.java          |    443 -
 .../api/AddDefaultConstraintRequest.java        |    443 -
 .../metastore/api/AddDynamicPartitions.java     |    959 -
 .../metastore/api/AddForeignKeyRequest.java     |    443 -
 .../api/AddNotNullConstraintRequest.java        |    443 -
 .../metastore/api/AddPartitionsRequest.java     |    955 -
 .../hive/metastore/api/AddPartitionsResult.java |    447 -
 .../metastore/api/AddPrimaryKeyRequest.java     |    443 -
 .../api/AddUniqueConstraintRequest.java         |    443 -
 .../hadoop/hive/metastore/api/AggrStats.java    |    542 -
 .../api/AllocateTableWriteIdsRequest.java       |    915 -
 .../api/AllocateTableWriteIdsResponse.java      |    443 -
 .../metastore/api/AlreadyExistsException.java   |    395 -
 .../hive/metastore/api/AlterCatalogRequest.java |    504 -
 .../hive/metastore/api/AlterISchemaRequest.java |    509 -
 .../hadoop/hive/metastore/api/BasicTxnInfo.java |    907 -
 .../metastore/api/BinaryColumnStatsData.java    |    696 -
 .../metastore/api/BooleanColumnStatsData.java   |    696 -
 .../metastore/api/CacheFileMetadataRequest.java |    703 -
 .../metastore/api/CacheFileMetadataResult.java  |    387 -
 .../hadoop/hive/metastore/api/Catalog.java      |    606 -
 .../metastore/api/CheckConstraintsRequest.java  |    591 -
 .../metastore/api/CheckConstraintsResponse.java |    443 -
 .../hive/metastore/api/CheckLockRequest.java    |    589 -
 .../metastore/api/ClearFileMetadataRequest.java |    438 -
 .../metastore/api/ClearFileMetadataResult.java  |    283 -
 .../hive/metastore/api/ClientCapabilities.java  |    441 -
 .../hive/metastore/api/ClientCapability.java    |     45 -
 .../hive/metastore/api/CmRecycleRequest.java    |    488 -
 .../hive/metastore/api/CmRecycleResponse.java   |    283 -
 .../hive/metastore/api/ColumnStatistics.java    |    549 -
 .../metastore/api/ColumnStatisticsData.java     |    675 -
 .../metastore/api/ColumnStatisticsDesc.java     |    904 -
 .../hive/metastore/api/ColumnStatisticsObj.java |    593 -
 .../hive/metastore/api/CommitTxnRequest.java    |    657 -
 .../hive/metastore/api/CompactionRequest.java   |    977 -
 .../hive/metastore/api/CompactionResponse.java  |    583 -
 .../hive/metastore/api/CompactionType.java      |     45 -
 .../api/ConfigValSecurityException.java         |    395 -
 .../metastore/api/CreateCatalogRequest.java     |    400 -
 .../hive/metastore/api/CreationMetadata.java    |    851 -
 .../api/CurrentNotificationEventId.java         |    387 -
 .../hive/metastore/api/DataOperationType.java   |     57 -
 .../hadoop/hive/metastore/api/Database.java     |   1201 -
 .../apache/hadoop/hive/metastore/api/Date.java  |    387 -
 .../hive/metastore/api/DateColumnStatsData.java |    823 -
 .../hadoop/hive/metastore/api/Decimal.java      |    497 -
 .../metastore/api/DecimalColumnStatsData.java   |    823 -
 .../api/DefaultConstraintsRequest.java          |    591 -
 .../api/DefaultConstraintsResponse.java         |    443 -
 .../metastore/api/DoubleColumnStatsData.java    |    799 -
 .../hive/metastore/api/DropCatalogRequest.java  |    395 -
 .../metastore/api/DropConstraintRequest.java    |    701 -
 .../hive/metastore/api/DropPartitionsExpr.java  |    505 -
 .../metastore/api/DropPartitionsRequest.java    |   1218 -
 .../metastore/api/DropPartitionsResult.java     |    447 -
 .../hive/metastore/api/EnvironmentContext.java  |    447 -
 .../hive/metastore/api/EventRequestType.java    |     48 -
 .../hadoop/hive/metastore/api/FieldSchema.java  |    603 -
 .../metastore/api/FileMetadataExprType.java     |     42 -
 .../metastore/api/FindSchemasByColsResp.java    |    449 -
 .../metastore/api/FindSchemasByColsRqst.java    |    605 -
 .../hive/metastore/api/FireEventRequest.java    |    967 -
 .../metastore/api/FireEventRequestData.java     |    309 -
 .../hive/metastore/api/FireEventResponse.java   |    283 -
 .../hive/metastore/api/ForeignKeysRequest.java  |    814 -
 .../hive/metastore/api/ForeignKeysResponse.java |    443 -
 .../hadoop/hive/metastore/api/Function.java     |   1306 -
 .../hadoop/hive/metastore/api/FunctionType.java |     42 -
 .../metastore/api/GetAllFunctionsResponse.java  |    447 -
 .../hive/metastore/api/GetCatalogRequest.java   |    395 -
 .../hive/metastore/api/GetCatalogResponse.java  |    400 -
 .../hive/metastore/api/GetCatalogsResponse.java |    444 -
 .../api/GetFileMetadataByExprRequest.java       |    773 -
 .../api/GetFileMetadataByExprResult.java        |    553 -
 .../metastore/api/GetFileMetadataRequest.java   |    438 -
 .../metastore/api/GetFileMetadataResult.java    |    540 -
 .../metastore/api/GetOpenTxnsInfoResponse.java  |    542 -
 .../hive/metastore/api/GetOpenTxnsResponse.java |    750 -
 .../api/GetPrincipalsInRoleRequest.java         |    389 -
 .../api/GetPrincipalsInRoleResponse.java        |    443 -
 .../api/GetRoleGrantsForPrincipalRequest.java   |    502 -
 .../api/GetRoleGrantsForPrincipalResponse.java  |    443 -
 .../metastore/api/GetRuntimeStatsRequest.java   |    482 -
 .../hive/metastore/api/GetSerdeRequest.java     |    395 -
 .../hive/metastore/api/GetTableRequest.java     |    711 -
 .../hive/metastore/api/GetTableResult.java      |    394 -
 .../hive/metastore/api/GetTablesRequest.java    |    765 -
 .../hive/metastore/api/GetTablesResult.java     |    443 -
 .../metastore/api/GetValidWriteIdsRequest.java  |    539 -
 .../metastore/api/GetValidWriteIdsResponse.java |    443 -
 .../api/GrantRevokePrivilegeRequest.java        |    620 -
 .../api/GrantRevokePrivilegeResponse.java       |    390 -
 .../metastore/api/GrantRevokeRoleRequest.java   |   1059 -
 .../metastore/api/GrantRevokeRoleResponse.java  |    390 -
 .../hive/metastore/api/GrantRevokeType.java     |     45 -
 .../hive/metastore/api/HeartbeatRequest.java    |    489 -
 .../metastore/api/HeartbeatTxnRangeRequest.java |    482 -
 .../api/HeartbeatTxnRangeResponse.java          |    588 -
 .../hive/metastore/api/HiveObjectPrivilege.java |    833 -
 .../hive/metastore/api/HiveObjectRef.java       |    979 -
 .../hive/metastore/api/HiveObjectType.java      |     54 -
 .../hadoop/hive/metastore/api/ISchema.java      |   1266 -
 .../hadoop/hive/metastore/api/ISchemaName.java  |    603 -
 .../metastore/api/InsertEventRequestData.java   |    855 -
 .../metastore/api/InvalidInputException.java    |    395 -
 .../metastore/api/InvalidObjectException.java   |    395 -
 .../api/InvalidOperationException.java          |    395 -
 .../api/InvalidPartitionException.java          |    395 -
 .../hive/metastore/api/LockComponent.java       |   1158 -
 .../hadoop/hive/metastore/api/LockLevel.java    |     48 -
 .../hadoop/hive/metastore/api/LockRequest.java  |    861 -
 .../hadoop/hive/metastore/api/LockResponse.java |    500 -
 .../hadoop/hive/metastore/api/LockState.java    |     51 -
 .../hadoop/hive/metastore/api/LockType.java     |     48 -
 .../hive/metastore/api/LongColumnStatsData.java |    799 -
 .../api/MapSchemaVersionToSerdeRequest.java     |    504 -
 .../hive/metastore/api/Materialization.java     |    750 -
 .../hive/metastore/api/MetaException.java       |    395 -
 .../hive/metastore/api/MetadataPpdResult.java   |    517 -
 .../hive/metastore/api/NoSuchLockException.java |    395 -
 .../metastore/api/NoSuchObjectException.java    |    395 -
 .../hive/metastore/api/NoSuchTxnException.java  |    395 -
 .../api/NotNullConstraintsRequest.java          |    591 -
 .../api/NotNullConstraintsResponse.java         |    443 -
 .../hive/metastore/api/NotificationEvent.java   |   1112 -
 .../metastore/api/NotificationEventRequest.java |    490 -
 .../api/NotificationEventResponse.java          |    443 -
 .../api/NotificationEventsCountRequest.java     |    598 -
 .../api/NotificationEventsCountResponse.java    |    387 -
 .../hive/metastore/api/OpenTxnRequest.java      |    963 -
 .../hive/metastore/api/OpenTxnsResponse.java    |    438 -
 .../apache/hadoop/hive/metastore/api/Order.java |    497 -
 .../hadoop/hive/metastore/api/Partition.java    |   1335 -
 .../hive/metastore/api/PartitionEventType.java  |     42 -
 .../api/PartitionListComposingSpec.java         |    449 -
 .../hive/metastore/api/PartitionSpec.java       |    932 -
 .../api/PartitionSpecWithSharedSD.java          |    558 -
 .../metastore/api/PartitionValuesRequest.java   |   1328 -
 .../metastore/api/PartitionValuesResponse.java  |    443 -
 .../hive/metastore/api/PartitionValuesRow.java  |    438 -
 .../hive/metastore/api/PartitionWithoutSD.java  |   1016 -
 .../metastore/api/PartitionsByExprRequest.java  |    921 -
 .../metastore/api/PartitionsByExprResult.java   |    542 -
 .../metastore/api/PartitionsStatsRequest.java   |    900 -
 .../metastore/api/PartitionsStatsResult.java    |    490 -
 .../hive/metastore/api/PrimaryKeysRequest.java  |    600 -
 .../hive/metastore/api/PrimaryKeysResponse.java |    443 -
 .../metastore/api/PrincipalPrivilegeSet.java    |    906 -
 .../hive/metastore/api/PrincipalType.java       |     48 -
 .../hadoop/hive/metastore/api/PrivilegeBag.java |    449 -
 .../hive/metastore/api/PrivilegeGrantInfo.java  |    815 -
 .../metastore/api/PutFileMetadataRequest.java   |    710 -
 .../metastore/api/PutFileMetadataResult.java    |    283 -
 .../api/ReplTblWriteIdStateRequest.java         |    952 -
 .../hive/metastore/api/RequestPartsSpec.java    |    438 -
 .../hadoop/hive/metastore/api/ResourceType.java |     48 -
 .../hadoop/hive/metastore/api/ResourceUri.java  |    511 -
 .../apache/hadoop/hive/metastore/api/Role.java  |    601 -
 .../hive/metastore/api/RolePrincipalGrant.java  |   1035 -
 .../hadoop/hive/metastore/api/RuntimeStat.java  |    600 -
 .../hive/metastore/api/SQLCheckConstraint.java  |   1213 -
 .../metastore/api/SQLDefaultConstraint.java     |   1213 -
 .../hive/metastore/api/SQLForeignKey.java       |   1822 -
 .../metastore/api/SQLNotNullConstraint.java     |   1109 -
 .../hive/metastore/api/SQLPrimaryKey.java       |   1210 -
 .../hive/metastore/api/SQLUniqueConstraint.java |   1207 -
 .../hadoop/hive/metastore/api/Schema.java       |    605 -
 .../hive/metastore/api/SchemaCompatibility.java |     51 -
 .../hadoop/hive/metastore/api/SchemaType.java   |     45 -
 .../hive/metastore/api/SchemaValidation.java    |     45 -
 .../hive/metastore/api/SchemaVersion.java       |   1412 -
 .../metastore/api/SchemaVersionDescriptor.java  |    502 -
 .../hive/metastore/api/SchemaVersionState.java  |     63 -
 .../hadoop/hive/metastore/api/SerDeInfo.java    |   1092 -
 .../hadoop/hive/metastore/api/SerdeType.java    |     45 -
 .../api/SetPartitionsStatsRequest.java          |    550 -
 .../api/SetSchemaVersionStateRequest.java       |    516 -
 .../hive/metastore/api/ShowCompactRequest.java  |    283 -
 .../hive/metastore/api/ShowCompactResponse.java |    443 -
 .../api/ShowCompactResponseElement.java         |   1641 -
 .../hive/metastore/api/ShowLocksRequest.java    |    710 -
 .../hive/metastore/api/ShowLocksResponse.java   |    449 -
 .../metastore/api/ShowLocksResponseElement.java |   1929 -
 .../hadoop/hive/metastore/api/SkewedInfo.java   |    834 -
 .../hive/metastore/api/StorageDescriptor.java   |   1748 -
 .../metastore/api/StringColumnStatsData.java    |    791 -
 .../apache/hadoop/hive/metastore/api/Table.java |   2283 -
 .../hadoop/hive/metastore/api/TableMeta.java    |    807 -
 .../hive/metastore/api/TableStatsRequest.java   |    750 -
 .../hive/metastore/api/TableStatsResult.java    |    443 -
 .../hive/metastore/api/TableValidWriteIds.java  |    851 -
 .../hive/metastore/api/ThriftHiveMetastore.java | 239288 ----------------
 .../hive/metastore/api/TxnAbortedException.java |    395 -
 .../hadoop/hive/metastore/api/TxnInfo.java      |   1220 -
 .../hive/metastore/api/TxnOpenException.java    |    395 -
 .../hadoop/hive/metastore/api/TxnState.java     |     48 -
 .../hadoop/hive/metastore/api/TxnToWriteId.java |    482 -
 .../apache/hadoop/hive/metastore/api/Type.java  |    768 -
 .../metastore/api/UniqueConstraintsRequest.java |    591 -
 .../api/UniqueConstraintsResponse.java          |    443 -
 .../hive/metastore/api/UnknownDBException.java  |    395 -
 .../api/UnknownPartitionException.java          |    395 -
 .../metastore/api/UnknownTableException.java    |    395 -
 .../hive/metastore/api/UnlockRequest.java       |    387 -
 .../hadoop/hive/metastore/api/Version.java      |    499 -
 .../hive/metastore/api/WMAlterPoolRequest.java  |    504 -
 .../hive/metastore/api/WMAlterPoolResponse.java |    283 -
 .../api/WMAlterResourcePlanRequest.java         |    805 -
 .../api/WMAlterResourcePlanResponse.java        |    398 -
 .../metastore/api/WMAlterTriggerRequest.java    |    398 -
 .../metastore/api/WMAlterTriggerResponse.java   |    283 -
 ...CreateOrDropTriggerToPoolMappingRequest.java |    708 -
 ...reateOrDropTriggerToPoolMappingResponse.java |    283 -
 .../api/WMCreateOrUpdateMappingRequest.java     |    501 -
 .../api/WMCreateOrUpdateMappingResponse.java    |    283 -
 .../hive/metastore/api/WMCreatePoolRequest.java |    398 -
 .../metastore/api/WMCreatePoolResponse.java     |    283 -
 .../api/WMCreateResourcePlanRequest.java        |    504 -
 .../api/WMCreateResourcePlanResponse.java       |    283 -
 .../metastore/api/WMCreateTriggerRequest.java   |    398 -
 .../metastore/api/WMCreateTriggerResponse.java  |    283 -
 .../metastore/api/WMDropMappingRequest.java     |    398 -
 .../metastore/api/WMDropMappingResponse.java    |    283 -
 .../hive/metastore/api/WMDropPoolRequest.java   |    499 -
 .../hive/metastore/api/WMDropPoolResponse.java  |    283 -
 .../api/WMDropResourcePlanRequest.java          |    393 -
 .../api/WMDropResourcePlanResponse.java         |    283 -
 .../metastore/api/WMDropTriggerRequest.java     |    499 -
 .../metastore/api/WMDropTriggerResponse.java    |    283 -
 .../hive/metastore/api/WMFullResourcePlan.java  |   1033 -
 .../api/WMGetActiveResourcePlanRequest.java     |    283 -
 .../api/WMGetActiveResourcePlanResponse.java    |    398 -
 .../api/WMGetAllResourcePlanRequest.java        |    283 -
 .../api/WMGetAllResourcePlanResponse.java       |    447 -
 .../metastore/api/WMGetResourcePlanRequest.java |    393 -
 .../api/WMGetResourcePlanResponse.java          |    398 -
 .../api/WMGetTriggersForResourePlanRequest.java |    393 -
 .../WMGetTriggersForResourePlanResponse.java    |    447 -
 .../hadoop/hive/metastore/api/WMMapping.java    |    804 -
 .../hive/metastore/api/WMNullablePool.java      |    901 -
 .../metastore/api/WMNullableResourcePlan.java   |    918 -
 .../hadoop/hive/metastore/api/WMPool.java       |    802 -
 .../metastore/api/WMPoolSchedulingPolicy.java   |     45 -
 .../hive/metastore/api/WMPoolTrigger.java       |    490 -
 .../hive/metastore/api/WMResourcePlan.java      |    720 -
 .../metastore/api/WMResourcePlanStatus.java     |     48 -
 .../hadoop/hive/metastore/api/WMTrigger.java    |    809 -
 .../api/WMValidateResourcePlanRequest.java      |    393 -
 .../api/WMValidateResourcePlanResponse.java     |    597 -
 .../hive/metastore/api/WriteEventInfo.java      |   1012 -
 .../api/WriteNotificationLogRequest.java        |    949 -
 .../api/WriteNotificationLogResponse.java       |    283 -
 .../metastore/api/hive_metastoreConstants.java  |     89 -
 .../gen-php/metastore/ThriftHiveMetastore.php   |  59951 ----
 .../src/gen/thrift/gen-php/metastore/Types.php  |  32146 ---
 .../src/gen/thrift/gen-py/__init__.py           |      0
 .../hive_metastore/ThriftHiveMetastore-remote   |   1634 -
 .../hive_metastore/ThriftHiveMetastore.py       |  48956 ----
 .../thrift/gen-py/hive_metastore/__init__.py    |      1 -
 .../thrift/gen-py/hive_metastore/constants.py   |     36 -
 .../gen/thrift/gen-py/hive_metastore/ttypes.py  |  23076 --
 .../thrift/gen-rb/hive_metastore_constants.rb   |     59 -
 .../gen/thrift/gen-rb/hive_metastore_types.rb   |   5322 -
 .../gen/thrift/gen-rb/thrift_hive_metastore.rb  |  13779 -
 .../hadoop/hive/common/StatsSetupConst.java     |    335 -
 .../common/classification/RetrySemantics.java   |     57 -
 .../common/ndv/NumDistinctValueEstimator.java   |     51 -
 .../ndv/NumDistinctValueEstimatorFactory.java   |     75 -
 .../hadoop/hive/common/ndv/fm/FMSketch.java     |    359 -
 .../hive/common/ndv/fm/FMSketchUtils.java       |    132 -
 .../hive/common/ndv/hll/HLLConstants.java       |    933 -
 .../hive/common/ndv/hll/HLLDenseRegister.java   |    202 -
 .../hadoop/hive/common/ndv/hll/HLLRegister.java |     50 -
 .../hive/common/ndv/hll/HLLSparseRegister.java  |    261 -
 .../hadoop/hive/common/ndv/hll/HyperLogLog.java |    664 -
 .../hive/common/ndv/hll/HyperLogLogUtils.java   |    409 -
 .../hive/metastore/AcidEventListener.java       |    146 -
 .../hive/metastore/AggregateStatsCache.java     |    571 -
 .../hadoop/hive/metastore/AlterHandler.java     |    202 -
 .../apache/hadoop/hive/metastore/Batchable.java |     86 -
 .../hadoop/hive/metastore/ColumnType.java       |    301 -
 .../hadoop/hive/metastore/DatabaseProduct.java  |     75 -
 .../apache/hadoop/hive/metastore/Deadline.java  |    172 -
 .../hive/metastore/DeadlineException.java       |     29 -
 .../hive/metastore/DefaultHiveMetaHook.java     |     51 -
 .../DefaultMetaStoreFilterHookImpl.java         |     93 -
 .../DefaultPartitionExpressionProxy.java        |     57 -
 .../metastore/DefaultStorageSchemaReader.java   |     38 -
 .../hadoop/hive/metastore/FileFormatProxy.java  |     64 -
 .../hive/metastore/FileMetadataHandler.java     |    109 -
 .../hive/metastore/FileMetadataManager.java     |    119 -
 .../hive/metastore/HMSMetricsListener.java      |     90 -
 .../hadoop/hive/metastore/HiveAlterHandler.java |    948 -
 .../hive/metastore/HiveMetaException.java       |     42 -
 .../hadoop/hive/metastore/HiveMetaHook.java     |    122 -
 .../hive/metastore/HiveMetaHookLoader.java      |     39 -
 .../hadoop/hive/metastore/HiveMetaStore.java    |   9354 -
 .../hive/metastore/HiveMetaStoreClient.java     |   3326 -
 .../hive/metastore/HiveMetaStoreFsImpl.java     |     55 -
 .../hive/metastore/IExtrapolatePartStatus.java  |     85 -
 .../hadoop/hive/metastore/IHMSHandler.java      |    104 -
 .../hadoop/hive/metastore/IMetaStoreClient.java |   3699 -
 .../hive/metastore/IMetaStoreSchemaInfo.java    |    115 -
 .../metastore/LinearExtrapolatePartStatus.java  |    106 -
 .../hive/metastore/LockComponentBuilder.java    |    121 -
 .../hive/metastore/LockRequestBuilder.java      |    168 -
 .../MaterializationsCacheCleanerTask.java       |     63 -
 .../MaterializationsInvalidationCache.java      |    543 -
 .../MaterializationsRebuildLockCleanerTask.java |     61 -
 .../MaterializationsRebuildLockHandler.java     |    216 -
 .../hive/metastore/MetaStoreDirectSql.java      |   2817 -
 .../metastore/MetaStoreEndFunctionContext.java  |     59 -
 .../metastore/MetaStoreEndFunctionListener.java |     58 -
 .../hive/metastore/MetaStoreEventListener.java  |    306 -
 .../MetaStoreEventListenerConstants.java        |     41 -
 .../hadoop/hive/metastore/MetaStoreFS.java      |     43 -
 .../hive/metastore/MetaStoreFilterHook.java     |    147 -
 .../hadoop/hive/metastore/MetaStoreInit.java    |    109 -
 .../hive/metastore/MetaStoreInitContext.java    |     27 -
 .../hive/metastore/MetaStoreInitListener.java   |     49 -
 .../metastore/MetaStoreListenerNotifier.java    |    375 -
 .../metastore/MetaStorePreEventListener.java    |     57 -
 .../hive/metastore/MetaStoreSchemaInfo.java     |    246 -
 .../metastore/MetaStoreSchemaInfoFactory.java   |     64 -
 .../hadoop/hive/metastore/MetaStoreThread.java  |     58 -
 .../hadoop/hive/metastore/MetadataStore.java    |     52 -
 .../hive/metastore/MetastoreTaskThread.java     |     38 -
 .../hadoop/hive/metastore/ObjectStore.java      |  12219 -
 .../hive/metastore/PartFilterExprUtil.java      |    165 -
 .../hive/metastore/PartitionDropOptions.java    |     54 -
 .../metastore/PartitionExpressionProxy.java     |     73 -
 .../apache/hadoop/hive/metastore/RawStore.java  |   1682 -
 .../hadoop/hive/metastore/RawStoreProxy.java    |    114 -
 .../hive/metastore/ReplChangeManager.java       |    501 -
 .../hive/metastore/RetryingHMSHandler.java      |    232 -
 .../hive/metastore/RetryingMetaStoreClient.java |    341 -
 .../hive/metastore/RuntimeStatsCleanerTask.java |     66 -
 .../metastore/SessionPropertiesListener.java    |     46 -
 .../hive/metastore/StatObjectConverter.java     |    892 -
 .../hive/metastore/StorageSchemaReader.java     |     46 -
 .../hive/metastore/TServerSocketKeepAlive.java  |     47 -
 .../hive/metastore/TSetIpAddressProcessor.java  |     62 -
 .../hive/metastore/TUGIBasedProcessor.java      |    183 -
 .../apache/hadoop/hive/metastore/TableType.java |     26 -
 .../hadoop/hive/metastore/ThreadPool.java       |     63 -
 .../TransactionalMetaStoreEventListener.java    |     39 -
 .../TransactionalValidationListener.java        |    487 -
 .../apache/hadoop/hive/metastore/Warehouse.java |    756 -
 .../annotation/MetastoreVersionAnnotation.java  |     85 -
 .../hive/metastore/annotation/NoReconnect.java  |     29 -
 .../api/InitializeTableWriteIdsRequest.java     |     42 -
 .../hive/metastore/api/utils/DecimalUtils.java  |     49 -
 .../hive/metastore/cache/ByteArrayWrapper.java  |     45 -
 .../hadoop/hive/metastore/cache/CacheUtils.java |    136 -
 .../hive/metastore/cache/CachedStore.java       |   2532 -
 .../hive/metastore/cache/SharedCache.java       |   1650 -
 .../client/builder/CatalogBuilder.java          |     62 -
 .../client/builder/ConstraintBuilder.java       |    115 -
 .../client/builder/DatabaseBuilder.java         |    122 -
 .../client/builder/FunctionBuilder.java         |    143 -
 .../GrantRevokePrivilegeRequestBuilder.java     |     63 -
 .../builder/HiveObjectPrivilegeBuilder.java     |     69 -
 .../client/builder/HiveObjectRefBuilder.java    |     69 -
 .../client/builder/ISchemaBuilder.java          |    102 -
 .../client/builder/PartitionBuilder.java        |    119 -
 .../builder/PrivilegeGrantInfoBuilder.java      |     84 -
 .../metastore/client/builder/RoleBuilder.java   |     55 -
 .../builder/SQLCheckConstraintBuilder.java      |     51 -
 .../builder/SQLDefaultConstraintBuilder.java    |     51 -
 .../client/builder/SQLForeignKeyBuilder.java    |    103 -
 .../builder/SQLNotNullConstraintBuilder.java    |     52 -
 .../client/builder/SQLPrimaryKeyBuilder.java    |     52 -
 .../builder/SQLUniqueConstraintBuilder.java     |     46 -
 .../client/builder/SchemaVersionBuilder.java    |    114 -
 .../client/builder/SerdeAndColsBuilder.java     |    124 -
 .../builder/StorageDescriptorBuilder.java       |    163 -
 .../metastore/client/builder/TableBuilder.java  |    224 -
 .../aggr/BinaryColumnStatsAggregator.java       |     61 -
 .../aggr/BooleanColumnStatsAggregator.java      |     62 -
 .../columnstats/aggr/ColumnStatsAggregator.java |     35 -
 .../aggr/ColumnStatsAggregatorFactory.java      |    113 -
 .../aggr/DateColumnStatsAggregator.java         |    360 -
 .../aggr/DecimalColumnStatsAggregator.java      |    375 -
 .../aggr/DoubleColumnStatsAggregator.java       |    348 -
 .../aggr/IExtrapolatePartStatus.java            |     47 -
 .../aggr/LongColumnStatsAggregator.java         |    348 -
 .../aggr/StringColumnStatsAggregator.java       |    304 -
 .../cache/DateColumnStatsDataInspector.java     |    124 -
 .../cache/DecimalColumnStatsDataInspector.java  |    124 -
 .../cache/DoubleColumnStatsDataInspector.java   |    124 -
 .../cache/LongColumnStatsDataInspector.java     |    124 -
 .../cache/StringColumnStatsDataInspector.java   |    125 -
 .../merge/BinaryColumnStatsMerger.java          |     35 -
 .../merge/BooleanColumnStatsMerger.java         |     35 -
 .../columnstats/merge/ColumnStatsMerger.java    |     31 -
 .../merge/ColumnStatsMergerFactory.java         |    120 -
 .../merge/DateColumnStatsMerger.java            |     59 -
 .../merge/DecimalColumnStatsMerger.java         |     85 -
 .../merge/DoubleColumnStatsMerger.java          |     54 -
 .../merge/LongColumnStatsMerger.java            |     54 -
 .../merge/StringColumnStatsMerger.java          |     54 -
 .../metastore/conf/ConfTemplatePrinter.java     |    150 -
 .../hive/metastore/conf/EnumValidator.java      |     26 -
 .../hive/metastore/conf/MetastoreConf.java      |   1688 -
 .../hive/metastore/conf/RangeValidator.java     |     38 -
 .../hive/metastore/conf/SizeValidator.java      |    110 -
 .../hive/metastore/conf/StringSetValidator.java |     51 -
 .../hive/metastore/conf/TimeValidator.java      |     67 -
 .../hadoop/hive/metastore/conf/Validator.java   |     87 -
 .../datasource/BoneCPDataSourceProvider.java    |     87 -
 .../datasource/DataSourceProvider.java          |     79 -
 .../datasource/DataSourceProviderFactory.java   |     66 -
 .../datasource/DbCPDataSourceProvider.java      |    117 -
 .../datasource/HikariCPDataSourceProvider.java  |     89 -
 .../hive/metastore/datasource/package-info.java |     23 -
 .../hive/metastore/events/AbortTxnEvent.java    |     51 -
 .../hive/metastore/events/AcidWriteEvent.java   |     91 -
 .../metastore/events/AddForeignKeyEvent.java    |     41 -
 .../events/AddNotNullConstraintEvent.java       |     42 -
 .../metastore/events/AddPartitionEvent.java     |     84 -
 .../metastore/events/AddPrimaryKeyEvent.java    |     42 -
 .../metastore/events/AddSchemaVersionEvent.java |     40 -
 .../events/AddUniqueConstraintEvent.java        |     42 -
 .../metastore/events/AllocWriteIdEvent.java     |     57 -
 .../metastore/events/AlterCatalogEvent.java     |     44 -
 .../metastore/events/AlterDatabaseEvent.java    |     56 -
 .../metastore/events/AlterISchemaEvent.java     |     45 -
 .../metastore/events/AlterPartitionEvent.java   |     75 -
 .../events/AlterSchemaVersionEvent.java         |     46 -
 .../hive/metastore/events/AlterTableEvent.java  |     63 -
 .../hive/metastore/events/CommitTxnEvent.java   |     51 -
 .../metastore/events/ConfigChangeEvent.java     |     52 -
 .../metastore/events/CreateCatalogEvent.java    |     39 -
 .../metastore/events/CreateDatabaseEvent.java   |     43 -
 .../metastore/events/CreateFunctionEvent.java   |     43 -
 .../metastore/events/CreateISchemaEvent.java    |     39 -
 .../hive/metastore/events/CreateTableEvent.java |     43 -
 .../hive/metastore/events/DropCatalogEvent.java |     39 -
 .../metastore/events/DropConstraintEvent.java   |     57 -
 .../metastore/events/DropDatabaseEvent.java     |     43 -
 .../metastore/events/DropFunctionEvent.java     |     43 -
 .../hive/metastore/events/DropISchemaEvent.java |     39 -
 .../metastore/events/DropPartitionEvent.java    |     70 -
 .../events/DropSchemaVersionEvent.java          |     40 -
 .../hive/metastore/events/DropTableEvent.java   |     54 -
 .../hive/metastore/events/EventCleanerTask.java |     66 -
 .../hive/metastore/events/InsertEvent.java      |    132 -
 .../hive/metastore/events/ListenerEvent.java    |    187 -
 .../events/LoadPartitionDoneEvent.java          |     57 -
 .../hive/metastore/events/OpenTxnEvent.java     |     51 -
 .../metastore/events/PreAddPartitionEvent.java  |     79 -
 .../events/PreAddSchemaVersionEvent.java        |     39 -
 .../metastore/events/PreAlterCatalogEvent.java  |     40 -
 .../metastore/events/PreAlterDatabaseEvent.java |     47 -
 .../metastore/events/PreAlterISchemaEvent.java  |     44 -
 .../events/PreAlterPartitionEvent.java          |     65 -
 .../events/PreAlterSchemaVersionEvent.java      |     45 -
 .../metastore/events/PreAlterTableEvent.java    |     53 -
 .../events/PreAuthorizationCallEvent.java       |     33 -
 .../metastore/events/PreCreateCatalogEvent.java |     39 -
 .../events/PreCreateDatabaseEvent.java          |     43 -
 .../metastore/events/PreCreateISchemaEvent.java |     39 -
 .../metastore/events/PreCreateTableEvent.java   |     43 -
 .../metastore/events/PreDropCatalogEvent.java   |     39 -
 .../metastore/events/PreDropDatabaseEvent.java  |     43 -
 .../metastore/events/PreDropISchemaEvent.java   |     39 -
 .../metastore/events/PreDropPartitionEvent.java |     67 -
 .../events/PreDropSchemaVersionEvent.java       |     39 -
 .../metastore/events/PreDropTableEvent.java     |     55 -
 .../hive/metastore/events/PreEventContext.java  |     82 -
 .../events/PreLoadPartitionDoneEvent.java       |     64 -
 .../metastore/events/PreReadCatalogEvent.java   |     39 -
 .../metastore/events/PreReadDatabaseEvent.java  |     46 -
 .../metastore/events/PreReadISchemaEvent.java   |     39 -
 .../metastore/events/PreReadTableEvent.java     |     47 -
 .../events/PreReadhSchemaVersionEvent.java      |     36 -
 .../metastore/hooks/JDOConnectionURLHook.java   |     52 -
 .../hive/metastore/hooks/URIResolverHook.java   |     37 -
 .../metastore/messaging/AbortTxnMessage.java    |     36 -
 .../metastore/messaging/AcidWriteMessage.java   |     50 -
 .../messaging/AddForeignKeyMessage.java         |     36 -
 .../messaging/AddNotNullConstraintMessage.java  |     36 -
 .../messaging/AddPartitionMessage.java          |     68 -
 .../messaging/AddPrimaryKeyMessage.java         |     35 -
 .../messaging/AddUniqueConstraintMessage.java   |     36 -
 .../messaging/AllocWriteIdMessage.java          |     36 -
 .../messaging/AlterCatalogMessage.java          |     29 -
 .../messaging/AlterDatabaseMessage.java         |     36 -
 .../messaging/AlterPartitionMessage.java        |     69 -
 .../metastore/messaging/AlterTableMessage.java  |     58 -
 .../metastore/messaging/CommitTxnMessage.java   |     59 -
 .../messaging/CreateCatalogMessage.java         |     25 -
 .../messaging/CreateDatabaseMessage.java        |     31 -
 .../messaging/CreateFunctionMessage.java        |     46 -
 .../metastore/messaging/CreateTableMessage.java |     53 -
 .../metastore/messaging/DropCatalogMessage.java |     25 -
 .../messaging/DropConstraintMessage.java        |     29 -
 .../messaging/DropDatabaseMessage.java          |     27 -
 .../messaging/DropFunctionMessage.java          |     38 -
 .../messaging/DropPartitionMessage.java         |     49 -
 .../metastore/messaging/DropTableMessage.java   |     46 -
 .../hive/metastore/messaging/EventMessage.java  |    127 -
 .../hive/metastore/messaging/EventUtils.java    |    202 -
 .../hive/metastore/messaging/InsertMessage.java |     75 -
 .../messaging/MessageDeserializer.java          |    200 -
 .../metastore/messaging/MessageFactory.java     |    341 -
 .../metastore/messaging/OpenTxnMessage.java     |     38 -
 .../metastore/messaging/PartitionFiles.java     |     53 -
 .../messaging/event/filters/AndFilter.java      |     39 -
 .../messaging/event/filters/BasicFilter.java    |     33 -
 .../event/filters/DatabaseAndTableFilter.java   |     65 -
 .../event/filters/EventBoundaryFilter.java      |     34 -
 .../event/filters/MessageFormatFilter.java      |     36 -
 .../messaging/json/JSONAbortTxnMessage.java     |     88 -
 .../messaging/json/JSONAcidWriteMessage.java    |    150 -
 .../json/JSONAddForeignKeyMessage.java          |    102 -
 .../json/JSONAddNotNullConstraintMessage.java   |     97 -
 .../messaging/json/JSONAddPartitionMessage.java |    175 -
 .../json/JSONAddPrimaryKeyMessage.java          |    102 -
 .../json/JSONAddUniqueConstraintMessage.java    |     99 -
 .../messaging/json/JSONAllocWriteIdMessage.java |    113 -
 .../messaging/json/JSONAlterCatalogMessage.java |     90 -
 .../json/JSONAlterDatabaseMessage.java          |     97 -
 .../json/JSONAlterPartitionMessage.java         |    153 -
 .../messaging/json/JSONAlterTableMessage.java   |    128 -
 .../messaging/json/JSONCommitTxnMessage.java    |    183 -
 .../json/JSONCreateCatalogMessage.java          |     80 -
 .../json/JSONCreateDatabaseMessage.java         |     85 -
 .../json/JSONCreateFunctionMessage.java         |     87 -
 .../messaging/json/JSONCreateTableMessage.java  |    134 -
 .../messaging/json/JSONDropCatalogMessage.java  |     67 -
 .../json/JSONDropConstraintMessage.java         |     91 -
 .../messaging/json/JSONDropDatabaseMessage.java |     72 -
 .../messaging/json/JSONDropFunctionMessage.java |     79 -
 .../json/JSONDropPartitionMessage.java          |    135 -
 .../messaging/json/JSONDropTableMessage.java    |    121 -
 .../messaging/json/JSONInsertMessage.java       |    148 -
 .../messaging/json/JSONMessageDeserializer.java |    273 -
 .../messaging/json/JSONMessageFactory.java      |    402 -
 .../messaging/json/JSONOpenTxnMessage.java      |    106 -
 .../hive/metastore/metrics/JsonReporter.java    |    223 -
 .../hive/metastore/metrics/JvmPauseMonitor.java |    222 -
 .../hadoop/hive/metastore/metrics/Metrics.java  |    244 -
 .../metastore/metrics/MetricsConstants.java     |     46 -
 .../hive/metastore/metrics/PerfLogger.java      |    194 -
 .../hadoop/hive/metastore/model/MCatalog.java   |     58 -
 .../hive/metastore/model/MColumnDescriptor.java |     51 -
 .../hive/metastore/model/MConstraint.java       |    214 -
 .../hive/metastore/model/MCreationMetadata.java |     87 -
 .../hive/metastore/model/MDBPrivilege.java      |    142 -
 .../hadoop/hive/metastore/model/MDatabase.java  |    157 -
 .../hive/metastore/model/MDelegationToken.java  |     45 -
 .../hive/metastore/model/MFieldSchema.java      |     80 -
 .../hadoop/hive/metastore/model/MFunction.java  |    119 -
 .../hive/metastore/model/MGlobalPrivilege.java  |    130 -
 .../hadoop/hive/metastore/model/MISchema.java   |    107 -
 .../hadoop/hive/metastore/model/MIndex.java     |    200 -
 .../hadoop/hive/metastore/model/MMasterKey.java |     55 -
 .../metastore/model/MMetastoreDBProperties.java |     56 -
 .../hive/metastore/model/MNotificationLog.java  |    108 -
 .../metastore/model/MNotificationNextId.java    |     42 -
 .../hadoop/hive/metastore/model/MOrder.java     |     62 -
 .../hadoop/hive/metastore/model/MPartition.java |    155 -
 .../model/MPartitionColumnPrivilege.java        |    171 -
 .../model/MPartitionColumnStatistics.java       |    281 -
 .../hive/metastore/model/MPartitionEvent.java   |     97 -
 .../metastore/model/MPartitionPrivilege.java    |    149 -
 .../hive/metastore/model/MPrincipalDesc.java    |     59 -
 .../hive/metastore/model/MResourceUri.java      |     49 -
 .../hadoop/hive/metastore/model/MRole.java      |     80 -
 .../hadoop/hive/metastore/model/MRoleMap.java   |    120 -
 .../hive/metastore/model/MRuntimeStat.java      |     59 -
 .../hive/metastore/model/MSchemaVersion.java    |    127 -
 .../hadoop/hive/metastore/model/MSerDeInfo.java |    127 -
 .../metastore/model/MStorageDescriptor.java     |    277 -
 .../hive/metastore/model/MStringList.java       |     62 -
 .../hadoop/hive/metastore/model/MTable.java     |    273 -
 .../metastore/model/MTableColumnPrivilege.java  |    170 -
 .../metastore/model/MTableColumnStatistics.java |    272 -
 .../hive/metastore/model/MTablePrivilege.java   |    149 -
 .../model/MTxnWriteNotificationLog.java         |    123 -
 .../hadoop/hive/metastore/model/MType.java      |    105 -
 .../hive/metastore/model/MVersionTable.java     |     57 -
 .../hadoop/hive/metastore/model/MWMMapping.java |     83 -
 .../hadoop/hive/metastore/model/MWMPool.java    |     89 -
 .../hive/metastore/model/MWMResourcePlan.java   |    105 -
 .../hadoop/hive/metastore/model/MWMTrigger.java |     89 -
 .../hive/metastore/parser/ExpressionTree.java   |    606 -
 .../hadoop/hive/metastore/parser/Filter.g       |    486 -
 .../hive/metastore/parser/package-info.java     |     23 -
 .../spec/CompositePartitionSpecProxy.java       |    258 -
 .../spec/PartitionListComposingSpecProxy.java   |    209 -
 .../partition/spec/PartitionSpecProxy.java      |    220 -
 .../spec/PartitionSpecWithSharedSDProxy.java    |    192 -
 .../hive/metastore/security/DBTokenStore.java   |    180 -
 .../security/DelegationTokenIdentifier.java     |     52 -
 .../security/DelegationTokenSecretManager.java  |    134 -
 .../security/DelegationTokenSelector.java       |     33 -
 .../security/DelegationTokenStore.java          |    116 -
 .../metastore/security/DelegationTokenTool.java |    252 -
 .../security/HadoopThriftAuthBridge.java        |    700 -
 .../security/HadoopThriftAuthBridge23.java      |    114 -
 .../metastore/security/MemoryTokenStore.java    |    118 -
 .../MetastoreDelegationTokenManager.java        |    180 -
 .../metastore/security/TFilterTransport.java    |     99 -
 .../security/TUGIAssumingTransport.java         |     73 -
 .../security/TUGIContainingTransport.java       |     96 -
 .../TokenStoreDelegationTokenSecretManager.java |    334 -
 .../metastore/security/ZooKeeperTokenStore.java |    474 -
 .../hive/metastore/tools/HiveMetaTool.java      |    490 -
 .../hive/metastore/tools/HiveSchemaHelper.java  |    673 -
 .../metastore/tools/MetastoreSchemaTool.java    |    460 -
 .../hive/metastore/tools/SQLGenerator.java      |    187 -
 .../metastore/tools/SchemaToolCommandLine.java  |    308 -
 .../hive/metastore/tools/SchemaToolTask.java    |     32 -
 .../tools/SchemaToolTaskAlterCatalog.java       |     90 -
 .../tools/SchemaToolTaskCreateCatalog.java      |    132 -
 .../tools/SchemaToolTaskCreateUser.java         |    115 -
 .../metastore/tools/SchemaToolTaskInfo.java     |     43 -
 .../metastore/tools/SchemaToolTaskInit.java     |     73 -
 .../tools/SchemaToolTaskMoveDatabase.java       |     96 -
 .../tools/SchemaToolTaskMoveTable.java          |    142 -
 .../metastore/tools/SchemaToolTaskUpgrade.java  |    116 -
 .../metastore/tools/SchemaToolTaskValidate.java |    630 -
 .../hadoop/hive/metastore/tools/SmokeTest.java  |    102 -
 .../txn/AcidCompactionHistoryService.java       |     71 -
 .../metastore/txn/AcidHouseKeeperService.java   |     71 -
 .../txn/AcidOpenTxnsCounterService.java         |     72 -
 .../hive/metastore/txn/AcidWriteSetService.java |     69 -
 .../hive/metastore/txn/CompactionInfo.java      |    170 -
 .../metastore/txn/CompactionTxnHandler.java     |   1107 -
 .../hadoop/hive/metastore/txn/TxnDbUtil.java    |    505 -
 .../hadoop/hive/metastore/txn/TxnHandler.java   |   4906 -
 .../hadoop/hive/metastore/txn/TxnStore.java     |    490 -
 .../hadoop/hive/metastore/txn/TxnUtils.java     |    471 -
 .../hive/metastore/utils/CommonCliOptions.java  |    160 -
 .../hadoop/hive/metastore/utils/FileUtils.java  |    537 -
 .../hadoop/hive/metastore/utils/HdfsUtils.java  |    395 -
 .../metastore/utils/HiveStrictManagedUtils.java |    100 -
 .../hadoop/hive/metastore/utils/JavaUtils.java  |    130 -
 .../hadoop/hive/metastore/utils/LogUtils.java   |    140 -
 .../hive/metastore/utils/MetaStoreUtils.java    |   1840 -
 .../metastore/utils/MetastoreVersionInfo.java   |    133 -
 .../hadoop/hive/metastore/utils/ObjectPair.java |     86 -
 .../hive/metastore/utils/SecurityUtils.java     |    313 -
 .../hive/metastore/utils/StringUtils.java       |    130 -
 .../hive/metastore/utils/StringableMap.java     |     80 -
 .../MetastoreDelegationTokenSupport.java        |     68 -
 .../hadoop/hive/metastore/metastore.proto       |     29 -
 .../main/resources/datanucleus-log4j.properties |     17 -
 .../main/resources/metastore-log4j2.properties  |     71 -
 .../src/main/resources/metastore-site.xml       |     34 -
 .../src/main/resources/package.jdo              |   1420 -
 .../src/main/resources/saveVersion.sh           |     91 -
 .../src/main/resources/thrift-replacements.txt  |    106 -
 standalone-metastore/src/main/scripts/base      |    231 -
 .../src/main/scripts/ext/metastore.sh           |     41 -
 .../src/main/scripts/ext/schemaTool.sh          |     33 -
 .../src/main/scripts/ext/smokeTest.sh           |     33 -
 .../src/main/scripts/metastore-config.sh        |     69 -
 .../src/main/scripts/schematool                 |     21 -
 .../src/main/scripts/start-metastore            |     22 -
 .../main/sql/derby/hive-schema-1.2.0.derby.sql  |    405 -
 .../main/sql/derby/hive-schema-3.0.0.derby.sql  |    692 -
 .../main/sql/derby/hive-schema-3.1.0.derby.sql  |    710 -
 .../main/sql/derby/hive-schema-4.0.0.derby.sql  |    710 -
 .../sql/derby/upgrade-1.2.0-to-2.0.0.derby.sql  |     62 -
 .../sql/derby/upgrade-2.0.0-to-2.1.0.derby.sql  |     22 -
 .../sql/derby/upgrade-2.1.0-to-2.2.0.derby.sql  |     59 -
 .../sql/derby/upgrade-2.2.0-to-2.3.0.derby.sql  |      5 -
 .../sql/derby/upgrade-2.3.0-to-3.0.0.derby.sql  |    283 -
 .../sql/derby/upgrade-3.0.0-to-3.1.0.derby.sql  |     49 -
 .../sql/derby/upgrade-3.1.0-to-4.0.0.derby.sql  |      6 -
 .../src/main/sql/derby/upgrade.order.derby      |     18 -
 .../src/main/sql/mssql/create-user.mssql.sql    |      5 -
 .../main/sql/mssql/hive-schema-1.2.0.mssql.sql  |    947 -
 .../main/sql/mssql/hive-schema-3.0.0.mssql.sql  |   1246 -
 .../main/sql/mssql/hive-schema-3.1.0.mssql.sql  |   1271 -
 .../main/sql/mssql/hive-schema-4.0.0.mssql.sql  |   1272 -
 .../sql/mssql/upgrade-1.2.0-to-2.0.0.mssql.sql  |     73 -
 .../sql/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql  |     39 -
 .../sql/mssql/upgrade-2.1.0-to-2.2.0.mssql.sql  |     43 -
 .../sql/mssql/upgrade-2.2.0-to-2.3.0.mssql.sql  |      7 -
 .../sql/mssql/upgrade-2.3.0-to-3.0.0.mssql.sql  |    352 -
 .../sql/mssql/upgrade-3.0.0-to-3.1.0.mssql.sql  |     51 -
 .../sql/mssql/upgrade-3.1.0-to-4.0.0.mssql.sql  |      6 -
 .../src/main/sql/mssql/upgrade.order.mssql      |     12 -
 .../src/main/sql/mysql/create-user.mysql.sql    |      8 -
 .../main/sql/mysql/hive-schema-1.2.0.mysql.sql  |    910 -
 .../main/sql/mysql/hive-schema-3.0.0.mysql.sql  |   1183 -
 .../main/sql/mysql/hive-schema-3.1.0.mysql.sql  |   1208 -
 .../main/sql/mysql/hive-schema-4.0.0.mysql.sql  |   1208 -
 .../sql/mysql/upgrade-1.2.0-to-2.0.0.mysql.sql  |     75 -
 .../sql/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql  |     42 -
 .../sql/mysql/upgrade-2.1.0-to-2.2.0.mysql.sql  |     43 -
 .../sql/mysql/upgrade-2.2.0-to-2.3.0.mysql.sql  |      8 -
 .../sql/mysql/upgrade-2.3.0-to-3.0.0.mysql.sql  |    326 -
 .../sql/mysql/upgrade-3.0.0-to-3.1.0.mysql.sql  |     51 -
 .../sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql  |      6 -
 .../src/main/sql/mysql/upgrade.order.mysql      |     18 -
 .../src/main/sql/oracle/create-user.oracle.sql  |      3 -
 .../sql/oracle/hive-schema-1.2.0.oracle.sql     |    856 -
 .../sql/oracle/hive-schema-3.0.0.oracle.sql     |   1140 -
 .../sql/oracle/hive-schema-3.1.0.oracle.sql     |   1165 -
 .../sql/oracle/hive-schema-4.0.0.oracle.sql     |   1165 -
 .../oracle/upgrade-1.2.0-to-2.0.0.oracle.sql    |     83 -
 .../oracle/upgrade-2.0.0-to-2.1.0.oracle.sql    |     39 -
 .../oracle/upgrade-2.1.0-to-2.2.0.oracle.sql    |     58 -
 .../oracle/upgrade-2.2.0-to-2.3.0.oracle.sql    |      7 -
 .../oracle/upgrade-2.3.0-to-3.0.0.oracle.sql    |    342 -
 .../oracle/upgrade-3.0.0-to-3.1.0.oracle.sql    |     51 -
 .../oracle/upgrade-3.1.0-to-4.0.0.oracle.sql    |      6 -
 .../src/main/sql/oracle/upgrade.order.oracle    |     14 -
 .../main/sql/postgres/create-user.postgres.sql  |      2 -
 .../sql/postgres/hive-schema-1.2.0.postgres.sql |   1562 -
 .../sql/postgres/hive-schema-3.0.0.postgres.sql |   1827 -
 .../sql/postgres/hive-schema-3.1.0.postgres.sql |   1856 -
 .../sql/postgres/hive-schema-4.0.0.postgres.sql |   1858 -
 .../upgrade-1.2.0-to-2.0.0.postgres.sql         |     73 -
 .../upgrade-2.0.0-to-2.1.0.postgres.sql         |     40 -
 .../upgrade-2.1.0-to-2.2.0.postgres.sql         |     39 -
 .../upgrade-2.2.0-to-2.3.0.postgres.sql         |      8 -
 .../upgrade-2.3.0-to-3.0.0.postgres.sql         |    360 -
 .../upgrade-3.0.0-to-3.1.0.postgres.sql         |     53 -
 .../upgrade-3.1.0-to-4.0.0.postgres.sql         |      6 -
 .../main/sql/postgres/upgrade.order.postgres    |     18 -
 .../src/main/thrift/hive_metastore.thrift       |   2275 -
 .../hadoop/hive/common/TestStatsSetupConst.java |    114 -
 .../ndv/fm/TestFMSketchSerialization.java       |    101 -
 .../hive/common/ndv/hll/TestHLLNoBias.java      |    117 -
 .../common/ndv/hll/TestHLLSerialization.java    |    270 -
 .../hive/common/ndv/hll/TestHyperLogLog.java    |    338 -
 .../common/ndv/hll/TestHyperLogLogDense.java    |     85 -
 .../common/ndv/hll/TestHyperLogLogMerge.java    |    147 -
 .../common/ndv/hll/TestHyperLogLogSparse.java   |     84 -
 .../common/ndv/hll/TestSparseEncodeHash.java    |     59 -
 .../metastore/AlternateFailurePreListener.java  |     62 -
 .../metastore/DummyEndFunctionListener.java     |     47 -
 .../metastore/DummyJdoConnectionUrlHook.java    |     45 -
 .../hadoop/hive/metastore/DummyListener.java    |    126 -
 .../metastore/DummyMetaStoreInitListener.java   |     39 -
 .../hadoop/hive/metastore/DummyPreListener.java |     49 -
 .../DummyRawStoreControlledCommit.java          |   1226 -
 .../DummyRawStoreForJdoConnection.java          |   1212 -
 .../apache/hadoop/hive/metastore/FakeDerby.java |    404 -
 .../HiveMetaStoreClientPreCatalog.java          |   3427 -
 .../InjectableBehaviourObjectStore.java         |    211 -
 .../hive/metastore/IpAddressListener.java       |    102 -
 .../hive/metastore/MetaStoreTestUtils.java      |    291 -
 .../MockPartitionExpressionForMetastore.java    |     58 -
 .../hive/metastore/NonCatCallsWithCatalog.java  |   1158 -
 .../hadoop/hive/metastore/TestAdminUser.java    |     49 -
 .../hive/metastore/TestAggregateStatsCache.java |    272 -
 .../metastore/TestCatalogNonDefaultClient.java  |     74 -
 .../metastore/TestCatalogNonDefaultSvr.java     |     68 -
 .../hive/metastore/TestCatalogOldClient.java    |     44 -
 .../hadoop/hive/metastore/TestDeadline.java     |    130 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |     51 -
 .../hadoop/hive/metastore/TestFilterHooks.java  |    254 -
 .../hive/metastore/TestHiveAlterHandler.java    |    121 -
 .../hive/metastore/TestHiveMetaStore.java       |   3103 -
 .../metastore/TestHiveMetaStoreGetMetaConf.java |    115 -
 .../TestHiveMetaStorePartitionSpecs.java        |    383 -
 .../TestHiveMetaStoreSchemaMethods.java         |   1248 -
 .../metastore/TestHiveMetaStoreTimeout.java     |    142 -
 .../hive/metastore/TestHiveMetaStoreTxns.java   |    267 -
 ...TestHiveMetaStoreWithEnvironmentContext.java |    191 -
 .../hive/metastore/TestHiveMetastoreCli.java    |     68 -
 .../hive/metastore/TestLockRequestBuilder.java  |    587 -
 .../hive/metastore/TestMarkPartition.java       |    118 -
 .../hive/metastore/TestMarkPartitionRemote.java |     34 -
 .../TestMetaStoreConnectionUrlHook.java         |     49 -
 .../TestMetaStoreEndFunctionListener.java       |    146 -
 .../metastore/TestMetaStoreEventListener.java   |    471 -
 .../TestMetaStoreEventListenerOnlyOnCommit.java |    121 -
 .../TestMetaStoreEventListenerWithOldConf.java  |    129 -
 .../metastore/TestMetaStoreInitListener.java    |     56 -
 .../metastore/TestMetaStoreListenersError.java  |     97 -
 ...stMetaStoreMaterializationsCacheCleaner.java |    328 -
 .../metastore/TestMetaStoreSchemaFactory.java   |     72 -
 .../hive/metastore/TestMetaStoreSchemaInfo.java |     55 -
 .../hadoop/hive/metastore/TestObjectStore.java  |    904 -
 .../metastore/TestObjectStoreInitRetry.java     |    135 -
 .../metastore/TestObjectStoreSchemaMethods.java |    602 -
 .../hadoop/hive/metastore/TestOldSchema.java    |    233 -
 .../TestPartitionNameWhitelistValidation.java   |    125 -
 .../hive/metastore/TestRawStoreProxy.java       |     67 -
 .../hive/metastore/TestRemoteHiveMetaStore.java |     64 -
 .../TestRemoteHiveMetaStoreIpAddress.java       |     66 -
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |     31 -
 .../TestRetriesInRetryingHMSHandler.java        |    111 -
 .../hive/metastore/TestRetryingHMSHandler.java  |     82 -
 .../metastore/TestSetUGIOnBothClientServer.java |     34 -
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |     35 -
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |     35 -
 .../apache/hadoop/hive/metastore/TestStats.java |    732 -
 .../hive/metastore/VerifyingObjectStore.java    |    219 -
 .../annotation/MetastoreCheckinTest.java        |     25 -
 .../metastore/annotation/MetastoreTest.java     |     24 -
 .../metastore/annotation/MetastoreUnitTest.java |     25 -
 .../hive/metastore/cache/TestCachedStore.java   |   1075 -
 .../metastore/cache/TestCatalogCaching.java     |    142 -
 .../metastore/client/MetaStoreClientTest.java   |     95 -
 .../client/MetaStoreFactoryForTests.java        |    112 -
 .../metastore/client/TestAddPartitions.java     |   1736 -
 .../client/TestAddPartitionsFromPartSpec.java   |   1267 -
 .../metastore/client/TestAlterPartitions.java   |   1117 -
 .../metastore/client/TestAppendPartitions.java  |    594 -
 .../hive/metastore/client/TestCatalogs.java     |    267 -
 .../metastore/client/TestCheckConstraint.java   |    363 -
 .../hive/metastore/client/TestDatabases.java    |    634 -
 .../metastore/client/TestDefaultConstraint.java |    363 -
 .../metastore/client/TestDropPartitions.java    |    659 -
 .../client/TestExchangePartitions.java          |   1337 -
 .../hive/metastore/client/TestForeignKey.java   |    538 -
 .../hive/metastore/client/TestFunctions.java    |    765 -
 .../metastore/client/TestGetPartitions.java     |    608 -
 .../hive/metastore/client/TestGetTableMeta.java |    330 -
 .../metastore/client/TestListPartitions.java    |   1522 -
 .../metastore/client/TestNotNullConstraint.java |    355 -
 .../hive/metastore/client/TestPrimaryKey.java   |    468 -
 .../hive/metastore/client/TestRuntimeStats.java |    154 -
 .../TestTablesCreateDropAlterTruncate.java      |   1384 -
 .../metastore/client/TestTablesGetExists.java   |    514 -
 .../hive/metastore/client/TestTablesList.java   |    320 -
 .../metastore/client/TestUniqueConstraint.java  |    356 -
 .../hive/metastore/client/package-info.java     |     22 -
 .../merge/DecimalColumnStatsMergerTest.java     |    235 -
 .../hive/metastore/conf/TestMetastoreConf.java  |    433 -
 .../TestDataSourceProviderFactory.java          |    248 -
 .../hive/metastore/dbinstall/DbInstallBase.java |    265 -
 .../hive/metastore/dbinstall/ITestMysql.java    |     82 -
 .../hive/metastore/dbinstall/ITestOracle.java   |     83 -
 .../hive/metastore/dbinstall/ITestPostgres.java |     82 -
 .../metastore/dbinstall/ITestSqlServer.java     |     84 -
 .../json/TestJSONMessageDeserializer.java       |    115 -
 .../hive/metastore/metrics/TestMetrics.java     |    164 -
 .../minihms/AbstractMetaStoreService.java       |    173 -
 .../minihms/ClusterMetaStoreForTests.java       |     32 -
 .../minihms/EmbeddedMetaStoreForTests.java      |     33 -
 .../hadoop/hive/metastore/minihms/MiniHMS.java  |     76 -
 .../minihms/RemoteMetaStoreForTests.java        |     43 -
 .../hive/metastore/minihms/package-info.java    |     23 -
 .../tools/TestMetastoreSchemaTool.java          |     70 -
 .../tools/TestSchemaToolForMetastore.java       |    534 -
 .../metastore/txn/TestTxnHandlerNegative.java   |     58 -
 .../hadoop/hive/metastore/txn/TestTxnUtils.java |    239 -
 .../hive/metastore/utils/TestHdfsUtils.java     |    348 -
 .../metastore/utils/TestMetaStoreUtils.java     |    291 -
 .../src/test/resources/log4j2.properties        |     35 -
 upgrade-acid/pom.xml                            |      2 +-
 1748 files changed, 914815 insertions(+), 914572 deletions(-)
----------------------------------------------------------------------



[42/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
new file mode 100644
index 0000000..dd3a127
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java
@@ -0,0 +1,955 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddPartitionsRequest implements org.apache.thrift.TBase<AddPartitionsRequest, AddPartitionsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddPartitionsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("parts", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField IF_NOT_EXISTS_FIELD_DESC = new org.apache.thrift.protocol.TField("ifNotExists", org.apache.thrift.protocol.TType.BOOL, (short)4);
+  private static final org.apache.thrift.protocol.TField NEED_RESULT_FIELD_DESC = new org.apache.thrift.protocol.TField("needResult", org.apache.thrift.protocol.TType.BOOL, (short)5);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddPartitionsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddPartitionsRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private List<Partition> parts; // required
+  private boolean ifNotExists; // required
+  private boolean needResult; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    PARTS((short)3, "parts"),
+    IF_NOT_EXISTS((short)4, "ifNotExists"),
+    NEED_RESULT((short)5, "needResult"),
+    CAT_NAME((short)6, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // PARTS
+          return PARTS;
+        case 4: // IF_NOT_EXISTS
+          return IF_NOT_EXISTS;
+        case 5: // NEED_RESULT
+          return NEED_RESULT;
+        case 6: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __IFNOTEXISTS_ISSET_ID = 0;
+  private static final int __NEEDRESULT_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.NEED_RESULT,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARTS, new org.apache.thrift.meta_data.FieldMetaData("parts", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+    tmpMap.put(_Fields.IF_NOT_EXISTS, new org.apache.thrift.meta_data.FieldMetaData("ifNotExists", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.NEED_RESULT, new org.apache.thrift.meta_data.FieldMetaData("needResult", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsRequest.class, metaDataMap);
+  }
+
+  public AddPartitionsRequest() {
+    this.needResult = true;
+
+  }
+
+  public AddPartitionsRequest(
+    String dbName,
+    String tblName,
+    List<Partition> parts,
+    boolean ifNotExists)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+    this.parts = parts;
+    this.ifNotExists = ifNotExists;
+    setIfNotExistsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddPartitionsRequest(AddPartitionsRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetParts()) {
+      List<Partition> __this__parts = new ArrayList<Partition>(other.parts.size());
+      for (Partition other_element : other.parts) {
+        __this__parts.add(new Partition(other_element));
+      }
+      this.parts = __this__parts;
+    }
+    this.ifNotExists = other.ifNotExists;
+    this.needResult = other.needResult;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public AddPartitionsRequest deepCopy() {
+    return new AddPartitionsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.parts = null;
+    setIfNotExistsIsSet(false);
+    this.ifNotExists = false;
+    this.needResult = true;
+
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public int getPartsSize() {
+    return (this.parts == null) ? 0 : this.parts.size();
+  }
+
+  public java.util.Iterator<Partition> getPartsIterator() {
+    return (this.parts == null) ? null : this.parts.iterator();
+  }
+
+  public void addToParts(Partition elem) {
+    if (this.parts == null) {
+      this.parts = new ArrayList<Partition>();
+    }
+    this.parts.add(elem);
+  }
+
+  public List<Partition> getParts() {
+    return this.parts;
+  }
+
+  public void setParts(List<Partition> parts) {
+    this.parts = parts;
+  }
+
+  public void unsetParts() {
+    this.parts = null;
+  }
+
+  /** Returns true if field parts is set (has been assigned a value) and false otherwise */
+  public boolean isSetParts() {
+    return this.parts != null;
+  }
+
+  public void setPartsIsSet(boolean value) {
+    if (!value) {
+      this.parts = null;
+    }
+  }
+
+  public boolean isIfNotExists() {
+    return this.ifNotExists;
+  }
+
+  public void setIfNotExists(boolean ifNotExists) {
+    this.ifNotExists = ifNotExists;
+    setIfNotExistsIsSet(true);
+  }
+
+  public void unsetIfNotExists() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __IFNOTEXISTS_ISSET_ID);
+  }
+
+  /** Returns true if field ifNotExists is set (has been assigned a value) and false otherwise */
+  public boolean isSetIfNotExists() {
+    return EncodingUtils.testBit(__isset_bitfield, __IFNOTEXISTS_ISSET_ID);
+  }
+
+  public void setIfNotExistsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __IFNOTEXISTS_ISSET_ID, value);
+  }
+
+  public boolean isNeedResult() {
+    return this.needResult;
+  }
+
+  public void setNeedResult(boolean needResult) {
+    this.needResult = needResult;
+    setNeedResultIsSet(true);
+  }
+
+  public void unsetNeedResult() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NEEDRESULT_ISSET_ID);
+  }
+
+  /** Returns true if field needResult is set (has been assigned a value) and false otherwise */
+  public boolean isSetNeedResult() {
+    return EncodingUtils.testBit(__isset_bitfield, __NEEDRESULT_ISSET_ID);
+  }
+
+  public void setNeedResultIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NEEDRESULT_ISSET_ID, value);
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case PARTS:
+      if (value == null) {
+        unsetParts();
+      } else {
+        setParts((List<Partition>)value);
+      }
+      break;
+
+    case IF_NOT_EXISTS:
+      if (value == null) {
+        unsetIfNotExists();
+      } else {
+        setIfNotExists((Boolean)value);
+      }
+      break;
+
+    case NEED_RESULT:
+      if (value == null) {
+        unsetNeedResult();
+      } else {
+        setNeedResult((Boolean)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case PARTS:
+      return getParts();
+
+    case IF_NOT_EXISTS:
+      return isIfNotExists();
+
+    case NEED_RESULT:
+      return isNeedResult();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case PARTS:
+      return isSetParts();
+    case IF_NOT_EXISTS:
+      return isSetIfNotExists();
+    case NEED_RESULT:
+      return isSetNeedResult();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddPartitionsRequest)
+      return this.equals((AddPartitionsRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddPartitionsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_parts = true && this.isSetParts();
+    boolean that_present_parts = true && that.isSetParts();
+    if (this_present_parts || that_present_parts) {
+      if (!(this_present_parts && that_present_parts))
+        return false;
+      if (!this.parts.equals(that.parts))
+        return false;
+    }
+
+    boolean this_present_ifNotExists = true;
+    boolean that_present_ifNotExists = true;
+    if (this_present_ifNotExists || that_present_ifNotExists) {
+      if (!(this_present_ifNotExists && that_present_ifNotExists))
+        return false;
+      if (this.ifNotExists != that.ifNotExists)
+        return false;
+    }
+
+    boolean this_present_needResult = true && this.isSetNeedResult();
+    boolean that_present_needResult = true && that.isSetNeedResult();
+    if (this_present_needResult || that_present_needResult) {
+      if (!(this_present_needResult && that_present_needResult))
+        return false;
+      if (this.needResult != that.needResult)
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_parts = true && (isSetParts());
+    list.add(present_parts);
+    if (present_parts)
+      list.add(parts);
+
+    boolean present_ifNotExists = true;
+    list.add(present_ifNotExists);
+    if (present_ifNotExists)
+      list.add(ifNotExists);
+
+    boolean present_needResult = true && (isSetNeedResult());
+    list.add(present_needResult);
+    if (present_needResult)
+      list.add(needResult);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddPartitionsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetParts()).compareTo(other.isSetParts());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParts()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parts, other.parts);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetIfNotExists()).compareTo(other.isSetIfNotExists());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetIfNotExists()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ifNotExists, other.ifNotExists);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetNeedResult()).compareTo(other.isSetNeedResult());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetNeedResult()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.needResult, other.needResult);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddPartitionsRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("parts:");
+    if (this.parts == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parts);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("ifNotExists:");
+    sb.append(this.ifNotExists);
+    first = false;
+    if (isSetNeedResult()) {
+      if (!first) sb.append(", ");
+      sb.append("needResult:");
+      sb.append(this.needResult);
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetParts()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'parts' is unset! Struct:" + toString());
+    }
+
+    if (!isSetIfNotExists()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'ifNotExists' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddPartitionsRequestStandardSchemeFactory implements SchemeFactory {
+    public AddPartitionsRequestStandardScheme getScheme() {
+      return new AddPartitionsRequestStandardScheme();
+    }
+  }
+
+  private static class AddPartitionsRequestStandardScheme extends StandardScheme<AddPartitionsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // PARTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list482 = iprot.readListBegin();
+                struct.parts = new ArrayList<Partition>(_list482.size);
+                Partition _elem483;
+                for (int _i484 = 0; _i484 < _list482.size; ++_i484)
+                {
+                  _elem483 = new Partition();
+                  _elem483.read(iprot);
+                  struct.parts.add(_elem483);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // IF_NOT_EXISTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.ifNotExists = iprot.readBool();
+              struct.setIfNotExistsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // NEED_RESULT
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.needResult = iprot.readBool();
+              struct.setNeedResultIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.parts != null) {
+        oprot.writeFieldBegin(PARTS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size()));
+          for (Partition _iter485 : struct.parts)
+          {
+            _iter485.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(IF_NOT_EXISTS_FIELD_DESC);
+      oprot.writeBool(struct.ifNotExists);
+      oprot.writeFieldEnd();
+      if (struct.isSetNeedResult()) {
+        oprot.writeFieldBegin(NEED_RESULT_FIELD_DESC);
+        oprot.writeBool(struct.needResult);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddPartitionsRequestTupleSchemeFactory implements SchemeFactory {
+    public AddPartitionsRequestTupleScheme getScheme() {
+      return new AddPartitionsRequestTupleScheme();
+    }
+  }
+
+  private static class AddPartitionsRequestTupleScheme extends TupleScheme<AddPartitionsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      {
+        oprot.writeI32(struct.parts.size());
+        for (Partition _iter486 : struct.parts)
+        {
+          _iter486.write(oprot);
+        }
+      }
+      oprot.writeBool(struct.ifNotExists);
+      BitSet optionals = new BitSet();
+      if (struct.isSetNeedResult()) {
+        optionals.set(0);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(1);
+      }
+      oprot.writeBitSet(optionals, 2);
+      if (struct.isSetNeedResult()) {
+        oprot.writeBool(struct.needResult);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      {
+        org.apache.thrift.protocol.TList _list487 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.parts = new ArrayList<Partition>(_list487.size);
+        Partition _elem488;
+        for (int _i489 = 0; _i489 < _list487.size; ++_i489)
+        {
+          _elem488 = new Partition();
+          _elem488.read(iprot);
+          struct.parts.add(_elem488);
+        }
+      }
+      struct.setPartsIsSet(true);
+      struct.ifNotExists = iprot.readBool();
+      struct.setIfNotExistsIsSet(true);
+      BitSet incoming = iprot.readBitSet(2);
+      if (incoming.get(0)) {
+        struct.needResult = iprot.readBool();
+        struct.setNeedResultIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
new file mode 100644
index 0000000..fe41b8c
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java
@@ -0,0 +1,447 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddPartitionsResult implements org.apache.thrift.TBase<AddPartitionsResult, AddPartitionsResult._Fields>, java.io.Serializable, Cloneable, Comparable<AddPartitionsResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPartitionsResult");
+
+  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddPartitionsResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddPartitionsResultTupleSchemeFactory());
+  }
+
+  private List<Partition> partitions; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARTITIONS((short)1, "partitions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARTITIONS
+          return PARTITIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.PARTITIONS};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPartitionsResult.class, metaDataMap);
+  }
+
+  public AddPartitionsResult() {
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddPartitionsResult(AddPartitionsResult other) {
+    if (other.isSetPartitions()) {
+      List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
+      for (Partition other_element : other.partitions) {
+        __this__partitions.add(new Partition(other_element));
+      }
+      this.partitions = __this__partitions;
+    }
+  }
+
+  public AddPartitionsResult deepCopy() {
+    return new AddPartitionsResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partitions = null;
+  }
+
+  public int getPartitionsSize() {
+    return (this.partitions == null) ? 0 : this.partitions.size();
+  }
+
+  public java.util.Iterator<Partition> getPartitionsIterator() {
+    return (this.partitions == null) ? null : this.partitions.iterator();
+  }
+
+  public void addToPartitions(Partition elem) {
+    if (this.partitions == null) {
+      this.partitions = new ArrayList<Partition>();
+    }
+    this.partitions.add(elem);
+  }
+
+  public List<Partition> getPartitions() {
+    return this.partitions;
+  }
+
+  public void setPartitions(List<Partition> partitions) {
+    this.partitions = partitions;
+  }
+
+  public void unsetPartitions() {
+    this.partitions = null;
+  }
+
+  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitions() {
+    return this.partitions != null;
+  }
+
+  public void setPartitionsIsSet(boolean value) {
+    if (!value) {
+      this.partitions = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARTITIONS:
+      if (value == null) {
+        unsetPartitions();
+      } else {
+        setPartitions((List<Partition>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARTITIONS:
+      return getPartitions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARTITIONS:
+      return isSetPartitions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddPartitionsResult)
+      return this.equals((AddPartitionsResult)that);
+    return false;
+  }
+
+  public boolean equals(AddPartitionsResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partitions = true && this.isSetPartitions();
+    boolean that_present_partitions = true && that.isSetPartitions();
+    if (this_present_partitions || that_present_partitions) {
+      if (!(this_present_partitions && that_present_partitions))
+        return false;
+      if (!this.partitions.equals(that.partitions))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partitions = true && (isSetPartitions());
+    list.add(present_partitions);
+    if (present_partitions)
+      list.add(partitions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddPartitionsResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddPartitionsResult(");
+    boolean first = true;
+
+    if (isSetPartitions()) {
+      sb.append("partitions:");
+      if (this.partitions == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.partitions);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddPartitionsResultStandardSchemeFactory implements SchemeFactory {
+    public AddPartitionsResultStandardScheme getScheme() {
+      return new AddPartitionsResultStandardScheme();
+    }
+  }
+
+  private static class AddPartitionsResultStandardScheme extends StandardScheme<AddPartitionsResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARTITIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list474 = iprot.readListBegin();
+                struct.partitions = new ArrayList<Partition>(_list474.size);
+                Partition _elem475;
+                for (int _i476 = 0; _i476 < _list474.size; ++_i476)
+                {
+                  _elem475 = new Partition();
+                  _elem475.read(iprot);
+                  struct.partitions.add(_elem475);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partitions != null) {
+        if (struct.isSetPartitions()) {
+          oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+            for (Partition _iter477 : struct.partitions)
+            {
+              _iter477.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddPartitionsResultTupleSchemeFactory implements SchemeFactory {
+    public AddPartitionsResultTupleScheme getScheme() {
+      return new AddPartitionsResultTupleScheme();
+    }
+  }
+
+  private static class AddPartitionsResultTupleScheme extends TupleScheme<AddPartitionsResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetPartitions()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetPartitions()) {
+        {
+          oprot.writeI32(struct.partitions.size());
+          for (Partition _iter478 : struct.partitions)
+          {
+            _iter478.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list479 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.partitions = new ArrayList<Partition>(_list479.size);
+          Partition _elem480;
+          for (int _i481 = 0; _i481 < _list479.size; ++_i481)
+          {
+            _elem480 = new Partition();
+            _elem480.read(iprot);
+            struct.partitions.add(_elem480);
+          }
+        }
+        struct.setPartitionsIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java
new file mode 100644
index 0000000..39bb6be
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPrimaryKeyRequest.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddPrimaryKeyRequest implements org.apache.thrift.TBase<AddPrimaryKeyRequest, AddPrimaryKeyRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddPrimaryKeyRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddPrimaryKeyRequest");
+
+  private static final org.apache.thrift.protocol.TField PRIMARY_KEY_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("primaryKeyCols", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddPrimaryKeyRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddPrimaryKeyRequestTupleSchemeFactory());
+  }
+
+  private List<SQLPrimaryKey> primaryKeyCols; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PRIMARY_KEY_COLS((short)1, "primaryKeyCols");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PRIMARY_KEY_COLS
+          return PRIMARY_KEY_COLS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PRIMARY_KEY_COLS, new org.apache.thrift.meta_data.FieldMetaData("primaryKeyCols", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLPrimaryKey.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddPrimaryKeyRequest.class, metaDataMap);
+  }
+
+  public AddPrimaryKeyRequest() {
+  }
+
+  public AddPrimaryKeyRequest(
+    List<SQLPrimaryKey> primaryKeyCols)
+  {
+    this();
+    this.primaryKeyCols = primaryKeyCols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddPrimaryKeyRequest(AddPrimaryKeyRequest other) {
+    if (other.isSetPrimaryKeyCols()) {
+      List<SQLPrimaryKey> __this__primaryKeyCols = new ArrayList<SQLPrimaryKey>(other.primaryKeyCols.size());
+      for (SQLPrimaryKey other_element : other.primaryKeyCols) {
+        __this__primaryKeyCols.add(new SQLPrimaryKey(other_element));
+      }
+      this.primaryKeyCols = __this__primaryKeyCols;
+    }
+  }
+
+  public AddPrimaryKeyRequest deepCopy() {
+    return new AddPrimaryKeyRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.primaryKeyCols = null;
+  }
+
+  public int getPrimaryKeyColsSize() {
+    return (this.primaryKeyCols == null) ? 0 : this.primaryKeyCols.size();
+  }
+
+  public java.util.Iterator<SQLPrimaryKey> getPrimaryKeyColsIterator() {
+    return (this.primaryKeyCols == null) ? null : this.primaryKeyCols.iterator();
+  }
+
+  public void addToPrimaryKeyCols(SQLPrimaryKey elem) {
+    if (this.primaryKeyCols == null) {
+      this.primaryKeyCols = new ArrayList<SQLPrimaryKey>();
+    }
+    this.primaryKeyCols.add(elem);
+  }
+
+  public List<SQLPrimaryKey> getPrimaryKeyCols() {
+    return this.primaryKeyCols;
+  }
+
+  public void setPrimaryKeyCols(List<SQLPrimaryKey> primaryKeyCols) {
+    this.primaryKeyCols = primaryKeyCols;
+  }
+
+  public void unsetPrimaryKeyCols() {
+    this.primaryKeyCols = null;
+  }
+
+  /** Returns true if field primaryKeyCols is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrimaryKeyCols() {
+    return this.primaryKeyCols != null;
+  }
+
+  public void setPrimaryKeyColsIsSet(boolean value) {
+    if (!value) {
+      this.primaryKeyCols = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PRIMARY_KEY_COLS:
+      if (value == null) {
+        unsetPrimaryKeyCols();
+      } else {
+        setPrimaryKeyCols((List<SQLPrimaryKey>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PRIMARY_KEY_COLS:
+      return getPrimaryKeyCols();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PRIMARY_KEY_COLS:
+      return isSetPrimaryKeyCols();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddPrimaryKeyRequest)
+      return this.equals((AddPrimaryKeyRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddPrimaryKeyRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_primaryKeyCols = true && this.isSetPrimaryKeyCols();
+    boolean that_present_primaryKeyCols = true && that.isSetPrimaryKeyCols();
+    if (this_present_primaryKeyCols || that_present_primaryKeyCols) {
+      if (!(this_present_primaryKeyCols && that_present_primaryKeyCols))
+        return false;
+      if (!this.primaryKeyCols.equals(that.primaryKeyCols))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_primaryKeyCols = true && (isSetPrimaryKeyCols());
+    list.add(present_primaryKeyCols);
+    if (present_primaryKeyCols)
+      list.add(primaryKeyCols);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddPrimaryKeyRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPrimaryKeyCols()).compareTo(other.isSetPrimaryKeyCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrimaryKeyCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.primaryKeyCols, other.primaryKeyCols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddPrimaryKeyRequest(");
+    boolean first = true;
+
+    sb.append("primaryKeyCols:");
+    if (this.primaryKeyCols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.primaryKeyCols);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPrimaryKeyCols()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'primaryKeyCols' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddPrimaryKeyRequestStandardSchemeFactory implements SchemeFactory {
+    public AddPrimaryKeyRequestStandardScheme getScheme() {
+      return new AddPrimaryKeyRequestStandardScheme();
+    }
+  }
+
+  private static class AddPrimaryKeyRequestStandardScheme extends StandardScheme<AddPrimaryKeyRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PRIMARY_KEY_COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list368 = iprot.readListBegin();
+                struct.primaryKeyCols = new ArrayList<SQLPrimaryKey>(_list368.size);
+                SQLPrimaryKey _elem369;
+                for (int _i370 = 0; _i370 < _list368.size; ++_i370)
+                {
+                  _elem369 = new SQLPrimaryKey();
+                  _elem369.read(iprot);
+                  struct.primaryKeyCols.add(_elem369);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPrimaryKeyColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.primaryKeyCols != null) {
+        oprot.writeFieldBegin(PRIMARY_KEY_COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeyCols.size()));
+          for (SQLPrimaryKey _iter371 : struct.primaryKeyCols)
+          {
+            _iter371.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddPrimaryKeyRequestTupleSchemeFactory implements SchemeFactory {
+    public AddPrimaryKeyRequestTupleScheme getScheme() {
+      return new AddPrimaryKeyRequestTupleScheme();
+    }
+  }
+
+  private static class AddPrimaryKeyRequestTupleScheme extends TupleScheme<AddPrimaryKeyRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.primaryKeyCols.size());
+        for (SQLPrimaryKey _iter372 : struct.primaryKeyCols)
+        {
+          _iter372.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddPrimaryKeyRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list373 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.primaryKeyCols = new ArrayList<SQLPrimaryKey>(_list373.size);
+        SQLPrimaryKey _elem374;
+        for (int _i375 = 0; _i375 < _list373.size; ++_i375)
+        {
+          _elem374 = new SQLPrimaryKey();
+          _elem374.read(iprot);
+          struct.primaryKeyCols.add(_elem374);
+        }
+      }
+      struct.setPrimaryKeyColsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java
new file mode 100644
index 0000000..bcb1e6b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddUniqueConstraintRequest.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AddUniqueConstraintRequest implements org.apache.thrift.TBase<AddUniqueConstraintRequest, AddUniqueConstraintRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AddUniqueConstraintRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AddUniqueConstraintRequest");
+
+  private static final org.apache.thrift.protocol.TField UNIQUE_CONSTRAINT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("uniqueConstraintCols", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AddUniqueConstraintRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AddUniqueConstraintRequestTupleSchemeFactory());
+  }
+
+  private List<SQLUniqueConstraint> uniqueConstraintCols; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    UNIQUE_CONSTRAINT_COLS((short)1, "uniqueConstraintCols");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // UNIQUE_CONSTRAINT_COLS
+          return UNIQUE_CONSTRAINT_COLS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.UNIQUE_CONSTRAINT_COLS, new org.apache.thrift.meta_data.FieldMetaData("uniqueConstraintCols", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLUniqueConstraint.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AddUniqueConstraintRequest.class, metaDataMap);
+  }
+
+  public AddUniqueConstraintRequest() {
+  }
+
+  public AddUniqueConstraintRequest(
+    List<SQLUniqueConstraint> uniqueConstraintCols)
+  {
+    this();
+    this.uniqueConstraintCols = uniqueConstraintCols;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AddUniqueConstraintRequest(AddUniqueConstraintRequest other) {
+    if (other.isSetUniqueConstraintCols()) {
+      List<SQLUniqueConstraint> __this__uniqueConstraintCols = new ArrayList<SQLUniqueConstraint>(other.uniqueConstraintCols.size());
+      for (SQLUniqueConstraint other_element : other.uniqueConstraintCols) {
+        __this__uniqueConstraintCols.add(new SQLUniqueConstraint(other_element));
+      }
+      this.uniqueConstraintCols = __this__uniqueConstraintCols;
+    }
+  }
+
+  public AddUniqueConstraintRequest deepCopy() {
+    return new AddUniqueConstraintRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.uniqueConstraintCols = null;
+  }
+
+  public int getUniqueConstraintColsSize() {
+    return (this.uniqueConstraintCols == null) ? 0 : this.uniqueConstraintCols.size();
+  }
+
+  public java.util.Iterator<SQLUniqueConstraint> getUniqueConstraintColsIterator() {
+    return (this.uniqueConstraintCols == null) ? null : this.uniqueConstraintCols.iterator();
+  }
+
+  public void addToUniqueConstraintCols(SQLUniqueConstraint elem) {
+    if (this.uniqueConstraintCols == null) {
+      this.uniqueConstraintCols = new ArrayList<SQLUniqueConstraint>();
+    }
+    this.uniqueConstraintCols.add(elem);
+  }
+
+  public List<SQLUniqueConstraint> getUniqueConstraintCols() {
+    return this.uniqueConstraintCols;
+  }
+
+  public void setUniqueConstraintCols(List<SQLUniqueConstraint> uniqueConstraintCols) {
+    this.uniqueConstraintCols = uniqueConstraintCols;
+  }
+
+  public void unsetUniqueConstraintCols() {
+    this.uniqueConstraintCols = null;
+  }
+
+  /** Returns true if field uniqueConstraintCols is set (has been assigned a value) and false otherwise */
+  public boolean isSetUniqueConstraintCols() {
+    return this.uniqueConstraintCols != null;
+  }
+
+  public void setUniqueConstraintColsIsSet(boolean value) {
+    if (!value) {
+      this.uniqueConstraintCols = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case UNIQUE_CONSTRAINT_COLS:
+      if (value == null) {
+        unsetUniqueConstraintCols();
+      } else {
+        setUniqueConstraintCols((List<SQLUniqueConstraint>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case UNIQUE_CONSTRAINT_COLS:
+      return getUniqueConstraintCols();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case UNIQUE_CONSTRAINT_COLS:
+      return isSetUniqueConstraintCols();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AddUniqueConstraintRequest)
+      return this.equals((AddUniqueConstraintRequest)that);
+    return false;
+  }
+
+  public boolean equals(AddUniqueConstraintRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_uniqueConstraintCols = true && this.isSetUniqueConstraintCols();
+    boolean that_present_uniqueConstraintCols = true && that.isSetUniqueConstraintCols();
+    if (this_present_uniqueConstraintCols || that_present_uniqueConstraintCols) {
+      if (!(this_present_uniqueConstraintCols && that_present_uniqueConstraintCols))
+        return false;
+      if (!this.uniqueConstraintCols.equals(that.uniqueConstraintCols))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_uniqueConstraintCols = true && (isSetUniqueConstraintCols());
+    list.add(present_uniqueConstraintCols);
+    if (present_uniqueConstraintCols)
+      list.add(uniqueConstraintCols);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AddUniqueConstraintRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetUniqueConstraintCols()).compareTo(other.isSetUniqueConstraintCols());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetUniqueConstraintCols()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.uniqueConstraintCols, other.uniqueConstraintCols);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AddUniqueConstraintRequest(");
+    boolean first = true;
+
+    sb.append("uniqueConstraintCols:");
+    if (this.uniqueConstraintCols == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.uniqueConstraintCols);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetUniqueConstraintCols()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'uniqueConstraintCols' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AddUniqueConstraintRequestStandardSchemeFactory implements SchemeFactory {
+    public AddUniqueConstraintRequestStandardScheme getScheme() {
+      return new AddUniqueConstraintRequestStandardScheme();
+    }
+  }
+
+  private static class AddUniqueConstraintRequestStandardScheme extends StandardScheme<AddUniqueConstraintRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // UNIQUE_CONSTRAINT_COLS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list384 = iprot.readListBegin();
+                struct.uniqueConstraintCols = new ArrayList<SQLUniqueConstraint>(_list384.size);
+                SQLUniqueConstraint _elem385;
+                for (int _i386 = 0; _i386 < _list384.size; ++_i386)
+                {
+                  _elem385 = new SQLUniqueConstraint();
+                  _elem385.read(iprot);
+                  struct.uniqueConstraintCols.add(_elem385);
+                }
+                iprot.readListEnd();
+              }
+              struct.setUniqueConstraintColsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.uniqueConstraintCols != null) {
+        oprot.writeFieldBegin(UNIQUE_CONSTRAINT_COLS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.uniqueConstraintCols.size()));
+          for (SQLUniqueConstraint _iter387 : struct.uniqueConstraintCols)
+          {
+            _iter387.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AddUniqueConstraintRequestTupleSchemeFactory implements SchemeFactory {
+    public AddUniqueConstraintRequestTupleScheme getScheme() {
+      return new AddUniqueConstraintRequestTupleScheme();
+    }
+  }
+
+  private static class AddUniqueConstraintRequestTupleScheme extends TupleScheme<AddUniqueConstraintRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.uniqueConstraintCols.size());
+        for (SQLUniqueConstraint _iter388 : struct.uniqueConstraintCols)
+        {
+          _iter388.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AddUniqueConstraintRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list389 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.uniqueConstraintCols = new ArrayList<SQLUniqueConstraint>(_list389.size);
+        SQLUniqueConstraint _elem390;
+        for (int _i391 = 0; _i391 < _list389.size; ++_i391)
+        {
+          _elem390 = new SQLUniqueConstraint();
+          _elem390.read(iprot);
+          struct.uniqueConstraintCols.add(_elem390);
+        }
+      }
+      struct.setUniqueConstraintColsIsSet(true);
+    }
+  }
+
+}
+


[41/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
new file mode 100644
index 0000000..fff212d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java
@@ -0,0 +1,542 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AggrStats implements org.apache.thrift.TBase<AggrStats, AggrStats._Fields>, java.io.Serializable, Cloneable, Comparable<AggrStats> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AggrStats");
+
+  private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField PARTS_FOUND_FIELD_DESC = new org.apache.thrift.protocol.TField("partsFound", org.apache.thrift.protocol.TType.I64, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AggrStatsStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AggrStatsTupleSchemeFactory());
+  }
+
+  private List<ColumnStatisticsObj> colStats; // required
+  private long partsFound; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    COL_STATS((short)1, "colStats"),
+    PARTS_FOUND((short)2, "partsFound");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // COL_STATS
+          return COL_STATS;
+        case 2: // PARTS_FOUND
+          return PARTS_FOUND;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __PARTSFOUND_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.COL_STATS, new org.apache.thrift.meta_data.FieldMetaData("colStats", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatisticsObj.class))));
+    tmpMap.put(_Fields.PARTS_FOUND, new org.apache.thrift.meta_data.FieldMetaData("partsFound", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AggrStats.class, metaDataMap);
+  }
+
+  public AggrStats() {
+  }
+
+  public AggrStats(
+    List<ColumnStatisticsObj> colStats,
+    long partsFound)
+  {
+    this();
+    this.colStats = colStats;
+    this.partsFound = partsFound;
+    setPartsFoundIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AggrStats(AggrStats other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetColStats()) {
+      List<ColumnStatisticsObj> __this__colStats = new ArrayList<ColumnStatisticsObj>(other.colStats.size());
+      for (ColumnStatisticsObj other_element : other.colStats) {
+        __this__colStats.add(new ColumnStatisticsObj(other_element));
+      }
+      this.colStats = __this__colStats;
+    }
+    this.partsFound = other.partsFound;
+  }
+
+  public AggrStats deepCopy() {
+    return new AggrStats(this);
+  }
+
+  @Override
+  public void clear() {
+    this.colStats = null;
+    setPartsFoundIsSet(false);
+    this.partsFound = 0;
+  }
+
+  public int getColStatsSize() {
+    return (this.colStats == null) ? 0 : this.colStats.size();
+  }
+
+  public java.util.Iterator<ColumnStatisticsObj> getColStatsIterator() {
+    return (this.colStats == null) ? null : this.colStats.iterator();
+  }
+
+  public void addToColStats(ColumnStatisticsObj elem) {
+    if (this.colStats == null) {
+      this.colStats = new ArrayList<ColumnStatisticsObj>();
+    }
+    this.colStats.add(elem);
+  }
+
+  public List<ColumnStatisticsObj> getColStats() {
+    return this.colStats;
+  }
+
+  public void setColStats(List<ColumnStatisticsObj> colStats) {
+    this.colStats = colStats;
+  }
+
+  public void unsetColStats() {
+    this.colStats = null;
+  }
+
+  /** Returns true if field colStats is set (has been assigned a value) and false otherwise */
+  public boolean isSetColStats() {
+    return this.colStats != null;
+  }
+
+  public void setColStatsIsSet(boolean value) {
+    if (!value) {
+      this.colStats = null;
+    }
+  }
+
+  public long getPartsFound() {
+    return this.partsFound;
+  }
+
+  public void setPartsFound(long partsFound) {
+    this.partsFound = partsFound;
+    setPartsFoundIsSet(true);
+  }
+
+  public void unsetPartsFound() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __PARTSFOUND_ISSET_ID);
+  }
+
+  /** Returns true if field partsFound is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartsFound() {
+    return EncodingUtils.testBit(__isset_bitfield, __PARTSFOUND_ISSET_ID);
+  }
+
+  public void setPartsFoundIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __PARTSFOUND_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case COL_STATS:
+      if (value == null) {
+        unsetColStats();
+      } else {
+        setColStats((List<ColumnStatisticsObj>)value);
+      }
+      break;
+
+    case PARTS_FOUND:
+      if (value == null) {
+        unsetPartsFound();
+      } else {
+        setPartsFound((Long)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case COL_STATS:
+      return getColStats();
+
+    case PARTS_FOUND:
+      return getPartsFound();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case COL_STATS:
+      return isSetColStats();
+    case PARTS_FOUND:
+      return isSetPartsFound();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AggrStats)
+      return this.equals((AggrStats)that);
+    return false;
+  }
+
+  public boolean equals(AggrStats that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_colStats = true && this.isSetColStats();
+    boolean that_present_colStats = true && that.isSetColStats();
+    if (this_present_colStats || that_present_colStats) {
+      if (!(this_present_colStats && that_present_colStats))
+        return false;
+      if (!this.colStats.equals(that.colStats))
+        return false;
+    }
+
+    boolean this_present_partsFound = true;
+    boolean that_present_partsFound = true;
+    if (this_present_partsFound || that_present_partsFound) {
+      if (!(this_present_partsFound && that_present_partsFound))
+        return false;
+      if (this.partsFound != that.partsFound)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_colStats = true && (isSetColStats());
+    list.add(present_colStats);
+    if (present_colStats)
+      list.add(colStats);
+
+    boolean present_partsFound = true;
+    list.add(present_partsFound);
+    if (present_partsFound)
+      list.add(partsFound);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AggrStats other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetColStats()).compareTo(other.isSetColStats());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetColStats()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colStats, other.colStats);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPartsFound()).compareTo(other.isSetPartsFound());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartsFound()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partsFound, other.partsFound);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AggrStats(");
+    boolean first = true;
+
+    sb.append("colStats:");
+    if (this.colStats == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.colStats);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("partsFound:");
+    sb.append(this.partsFound);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetColStats()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'colStats' is unset! Struct:" + toString());
+    }
+
+    if (!isSetPartsFound()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partsFound' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AggrStatsStandardSchemeFactory implements SchemeFactory {
+    public AggrStatsStandardScheme getScheme() {
+      return new AggrStatsStandardScheme();
+    }
+  }
+
+  private static class AggrStatsStandardScheme extends StandardScheme<AggrStats> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // COL_STATS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list276 = iprot.readListBegin();
+                struct.colStats = new ArrayList<ColumnStatisticsObj>(_list276.size);
+                ColumnStatisticsObj _elem277;
+                for (int _i278 = 0; _i278 < _list276.size; ++_i278)
+                {
+                  _elem277 = new ColumnStatisticsObj();
+                  _elem277.read(iprot);
+                  struct.colStats.add(_elem277);
+                }
+                iprot.readListEnd();
+              }
+              struct.setColStatsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // PARTS_FOUND
+            if (schemeField.type == org.apache.thrift.protocol.TType.I64) {
+              struct.partsFound = iprot.readI64();
+              struct.setPartsFoundIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.colStats != null) {
+        oprot.writeFieldBegin(COL_STATS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size()));
+          for (ColumnStatisticsObj _iter279 : struct.colStats)
+          {
+            _iter279.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(PARTS_FOUND_FIELD_DESC);
+      oprot.writeI64(struct.partsFound);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AggrStatsTupleSchemeFactory implements SchemeFactory {
+    public AggrStatsTupleScheme getScheme() {
+      return new AggrStatsTupleScheme();
+    }
+  }
+
+  private static class AggrStatsTupleScheme extends TupleScheme<AggrStats> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.colStats.size());
+        for (ColumnStatisticsObj _iter280 : struct.colStats)
+        {
+          _iter280.write(oprot);
+        }
+      }
+      oprot.writeI64(struct.partsFound);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.colStats = new ArrayList<ColumnStatisticsObj>(_list281.size);
+        ColumnStatisticsObj _elem282;
+        for (int _i283 = 0; _i283 < _list281.size; ++_i283)
+        {
+          _elem282 = new ColumnStatisticsObj();
+          _elem282.read(iprot);
+          struct.colStats.add(_elem282);
+        }
+      }
+      struct.setColStatsIsSet(true);
+      struct.partsFound = iprot.readI64();
+      struct.setPartsFoundIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
new file mode 100644
index 0000000..a0b47a9
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsRequest.java
@@ -0,0 +1,915 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AllocateTableWriteIdsRequest implements org.apache.thrift.TBase<AllocateTableWriteIdsRequest, AllocateTableWriteIdsRequest._Fields>, java.io.Serializable, Cloneable, Comparable<AllocateTableWriteIdsRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AllocateTableWriteIdsRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField TXN_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnIds", org.apache.thrift.protocol.TType.LIST, (short)3);
+  private static final org.apache.thrift.protocol.TField REPL_POLICY_FIELD_DESC = new org.apache.thrift.protocol.TField("replPolicy", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("srcTxnToWriteIdList", org.apache.thrift.protocol.TType.LIST, (short)5);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AllocateTableWriteIdsRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AllocateTableWriteIdsRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tableName; // required
+  private List<Long> txnIds; // optional
+  private String replPolicy; // optional
+  private List<TxnToWriteId> srcTxnToWriteIdList; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TABLE_NAME((short)2, "tableName"),
+    TXN_IDS((short)3, "txnIds"),
+    REPL_POLICY((short)4, "replPolicy"),
+    SRC_TXN_TO_WRITE_ID_LIST((short)5, "srcTxnToWriteIdList");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TABLE_NAME
+          return TABLE_NAME;
+        case 3: // TXN_IDS
+          return TXN_IDS;
+        case 4: // REPL_POLICY
+          return REPL_POLICY;
+        case 5: // SRC_TXN_TO_WRITE_ID_LIST
+          return SRC_TXN_TO_WRITE_ID_LIST;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final _Fields optionals[] = {_Fields.TXN_IDS,_Fields.REPL_POLICY,_Fields.SRC_TXN_TO_WRITE_ID_LIST};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TXN_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnIds", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I64))));
+    tmpMap.put(_Fields.REPL_POLICY, new org.apache.thrift.meta_data.FieldMetaData("replPolicy", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.SRC_TXN_TO_WRITE_ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("srcTxnToWriteIdList", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT            , "TxnToWriteId"))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdsRequest.class, metaDataMap);
+  }
+
+  public AllocateTableWriteIdsRequest() {
+  }
+
+  public AllocateTableWriteIdsRequest(
+    String dbName,
+    String tableName)
+  {
+    this();
+    this.dbName = dbName;
+    this.tableName = tableName;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AllocateTableWriteIdsRequest(AllocateTableWriteIdsRequest other) {
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTableName()) {
+      this.tableName = other.tableName;
+    }
+    if (other.isSetTxnIds()) {
+      List<Long> __this__txnIds = new ArrayList<Long>(other.txnIds);
+      this.txnIds = __this__txnIds;
+    }
+    if (other.isSetReplPolicy()) {
+      this.replPolicy = other.replPolicy;
+    }
+    if (other.isSetSrcTxnToWriteIdList()) {
+      List<TxnToWriteId> __this__srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(other.srcTxnToWriteIdList.size());
+      for (TxnToWriteId other_element : other.srcTxnToWriteIdList) {
+        __this__srcTxnToWriteIdList.add(other_element);
+      }
+      this.srcTxnToWriteIdList = __this__srcTxnToWriteIdList;
+    }
+  }
+
+  public AllocateTableWriteIdsRequest deepCopy() {
+    return new AllocateTableWriteIdsRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tableName = null;
+    this.txnIds = null;
+    this.replPolicy = null;
+    this.srcTxnToWriteIdList = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTableName() {
+    return this.tableName;
+  }
+
+  public void setTableName(String tableName) {
+    this.tableName = tableName;
+  }
+
+  public void unsetTableName() {
+    this.tableName = null;
+  }
+
+  /** Returns true if field tableName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTableName() {
+    return this.tableName != null;
+  }
+
+  public void setTableNameIsSet(boolean value) {
+    if (!value) {
+      this.tableName = null;
+    }
+  }
+
+  public int getTxnIdsSize() {
+    return (this.txnIds == null) ? 0 : this.txnIds.size();
+  }
+
+  public java.util.Iterator<Long> getTxnIdsIterator() {
+    return (this.txnIds == null) ? null : this.txnIds.iterator();
+  }
+
+  public void addToTxnIds(long elem) {
+    if (this.txnIds == null) {
+      this.txnIds = new ArrayList<Long>();
+    }
+    this.txnIds.add(elem);
+  }
+
+  public List<Long> getTxnIds() {
+    return this.txnIds;
+  }
+
+  public void setTxnIds(List<Long> txnIds) {
+    this.txnIds = txnIds;
+  }
+
+  public void unsetTxnIds() {
+    this.txnIds = null;
+  }
+
+  /** Returns true if field txnIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnIds() {
+    return this.txnIds != null;
+  }
+
+  public void setTxnIdsIsSet(boolean value) {
+    if (!value) {
+      this.txnIds = null;
+    }
+  }
+
+  public String getReplPolicy() {
+    return this.replPolicy;
+  }
+
+  public void setReplPolicy(String replPolicy) {
+    this.replPolicy = replPolicy;
+  }
+
+  public void unsetReplPolicy() {
+    this.replPolicy = null;
+  }
+
+  /** Returns true if field replPolicy is set (has been assigned a value) and false otherwise */
+  public boolean isSetReplPolicy() {
+    return this.replPolicy != null;
+  }
+
+  public void setReplPolicyIsSet(boolean value) {
+    if (!value) {
+      this.replPolicy = null;
+    }
+  }
+
+  public int getSrcTxnToWriteIdListSize() {
+    return (this.srcTxnToWriteIdList == null) ? 0 : this.srcTxnToWriteIdList.size();
+  }
+
+  public java.util.Iterator<TxnToWriteId> getSrcTxnToWriteIdListIterator() {
+    return (this.srcTxnToWriteIdList == null) ? null : this.srcTxnToWriteIdList.iterator();
+  }
+
+  public void addToSrcTxnToWriteIdList(TxnToWriteId elem) {
+    if (this.srcTxnToWriteIdList == null) {
+      this.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>();
+    }
+    this.srcTxnToWriteIdList.add(elem);
+  }
+
+  public List<TxnToWriteId> getSrcTxnToWriteIdList() {
+    return this.srcTxnToWriteIdList;
+  }
+
+  public void setSrcTxnToWriteIdList(List<TxnToWriteId> srcTxnToWriteIdList) {
+    this.srcTxnToWriteIdList = srcTxnToWriteIdList;
+  }
+
+  public void unsetSrcTxnToWriteIdList() {
+    this.srcTxnToWriteIdList = null;
+  }
+
+  /** Returns true if field srcTxnToWriteIdList is set (has been assigned a value) and false otherwise */
+  public boolean isSetSrcTxnToWriteIdList() {
+    return this.srcTxnToWriteIdList != null;
+  }
+
+  public void setSrcTxnToWriteIdListIsSet(boolean value) {
+    if (!value) {
+      this.srcTxnToWriteIdList = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TABLE_NAME:
+      if (value == null) {
+        unsetTableName();
+      } else {
+        setTableName((String)value);
+      }
+      break;
+
+    case TXN_IDS:
+      if (value == null) {
+        unsetTxnIds();
+      } else {
+        setTxnIds((List<Long>)value);
+      }
+      break;
+
+    case REPL_POLICY:
+      if (value == null) {
+        unsetReplPolicy();
+      } else {
+        setReplPolicy((String)value);
+      }
+      break;
+
+    case SRC_TXN_TO_WRITE_ID_LIST:
+      if (value == null) {
+        unsetSrcTxnToWriteIdList();
+      } else {
+        setSrcTxnToWriteIdList((List<TxnToWriteId>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TABLE_NAME:
+      return getTableName();
+
+    case TXN_IDS:
+      return getTxnIds();
+
+    case REPL_POLICY:
+      return getReplPolicy();
+
+    case SRC_TXN_TO_WRITE_ID_LIST:
+      return getSrcTxnToWriteIdList();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TABLE_NAME:
+      return isSetTableName();
+    case TXN_IDS:
+      return isSetTxnIds();
+    case REPL_POLICY:
+      return isSetReplPolicy();
+    case SRC_TXN_TO_WRITE_ID_LIST:
+      return isSetSrcTxnToWriteIdList();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AllocateTableWriteIdsRequest)
+      return this.equals((AllocateTableWriteIdsRequest)that);
+    return false;
+  }
+
+  public boolean equals(AllocateTableWriteIdsRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tableName = true && this.isSetTableName();
+    boolean that_present_tableName = true && that.isSetTableName();
+    if (this_present_tableName || that_present_tableName) {
+      if (!(this_present_tableName && that_present_tableName))
+        return false;
+      if (!this.tableName.equals(that.tableName))
+        return false;
+    }
+
+    boolean this_present_txnIds = true && this.isSetTxnIds();
+    boolean that_present_txnIds = true && that.isSetTxnIds();
+    if (this_present_txnIds || that_present_txnIds) {
+      if (!(this_present_txnIds && that_present_txnIds))
+        return false;
+      if (!this.txnIds.equals(that.txnIds))
+        return false;
+    }
+
+    boolean this_present_replPolicy = true && this.isSetReplPolicy();
+    boolean that_present_replPolicy = true && that.isSetReplPolicy();
+    if (this_present_replPolicy || that_present_replPolicy) {
+      if (!(this_present_replPolicy && that_present_replPolicy))
+        return false;
+      if (!this.replPolicy.equals(that.replPolicy))
+        return false;
+    }
+
+    boolean this_present_srcTxnToWriteIdList = true && this.isSetSrcTxnToWriteIdList();
+    boolean that_present_srcTxnToWriteIdList = true && that.isSetSrcTxnToWriteIdList();
+    if (this_present_srcTxnToWriteIdList || that_present_srcTxnToWriteIdList) {
+      if (!(this_present_srcTxnToWriteIdList && that_present_srcTxnToWriteIdList))
+        return false;
+      if (!this.srcTxnToWriteIdList.equals(that.srcTxnToWriteIdList))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tableName = true && (isSetTableName());
+    list.add(present_tableName);
+    if (present_tableName)
+      list.add(tableName);
+
+    boolean present_txnIds = true && (isSetTxnIds());
+    list.add(present_txnIds);
+    if (present_txnIds)
+      list.add(txnIds);
+
+    boolean present_replPolicy = true && (isSetReplPolicy());
+    list.add(present_replPolicy);
+    if (present_replPolicy)
+      list.add(replPolicy);
+
+    boolean present_srcTxnToWriteIdList = true && (isSetSrcTxnToWriteIdList());
+    list.add(present_srcTxnToWriteIdList);
+    if (present_srcTxnToWriteIdList)
+      list.add(srcTxnToWriteIdList);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AllocateTableWriteIdsRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTableName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTxnIds()).compareTo(other.isSetTxnIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnIds, other.txnIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetReplPolicy()).compareTo(other.isSetReplPolicy());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetReplPolicy()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.replPolicy, other.replPolicy);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetSrcTxnToWriteIdList()).compareTo(other.isSetSrcTxnToWriteIdList());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetSrcTxnToWriteIdList()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.srcTxnToWriteIdList, other.srcTxnToWriteIdList);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AllocateTableWriteIdsRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tableName:");
+    if (this.tableName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tableName);
+    }
+    first = false;
+    if (isSetTxnIds()) {
+      if (!first) sb.append(", ");
+      sb.append("txnIds:");
+      if (this.txnIds == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.txnIds);
+      }
+      first = false;
+    }
+    if (isSetReplPolicy()) {
+      if (!first) sb.append(", ");
+      sb.append("replPolicy:");
+      if (this.replPolicy == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.replPolicy);
+      }
+      first = false;
+    }
+    if (isSetSrcTxnToWriteIdList()) {
+      if (!first) sb.append(", ");
+      sb.append("srcTxnToWriteIdList:");
+      if (this.srcTxnToWriteIdList == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.srcTxnToWriteIdList);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTableName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tableName' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AllocateTableWriteIdsRequestStandardSchemeFactory implements SchemeFactory {
+    public AllocateTableWriteIdsRequestStandardScheme getScheme() {
+      return new AllocateTableWriteIdsRequestStandardScheme();
+    }
+  }
+
+  private static class AllocateTableWriteIdsRequestStandardScheme extends StandardScheme<AllocateTableWriteIdsRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteIdsRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TABLE_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tableName = iprot.readString();
+              struct.setTableNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // TXN_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list634 = iprot.readListBegin();
+                struct.txnIds = new ArrayList<Long>(_list634.size);
+                long _elem635;
+                for (int _i636 = 0; _i636 < _list634.size; ++_i636)
+                {
+                  _elem635 = iprot.readI64();
+                  struct.txnIds.add(_elem635);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTxnIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // REPL_POLICY
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.replPolicy = iprot.readString();
+              struct.setReplPolicyIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // SRC_TXN_TO_WRITE_ID_LIST
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list637 = iprot.readListBegin();
+                struct.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(_list637.size);
+                TxnToWriteId _elem638;
+                for (int _i639 = 0; _i639 < _list637.size; ++_i639)
+                {
+                  _elem638 = new TxnToWriteId();
+                  _elem638.read(iprot);
+                  struct.srcTxnToWriteIdList.add(_elem638);
+                }
+                iprot.readListEnd();
+              }
+              struct.setSrcTxnToWriteIdListIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWriteIdsRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tableName != null) {
+        oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC);
+        oprot.writeString(struct.tableName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.txnIds != null) {
+        if (struct.isSetTxnIds()) {
+          oprot.writeFieldBegin(TXN_IDS_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txnIds.size()));
+            for (long _iter640 : struct.txnIds)
+            {
+              oprot.writeI64(_iter640);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.replPolicy != null) {
+        if (struct.isSetReplPolicy()) {
+          oprot.writeFieldBegin(REPL_POLICY_FIELD_DESC);
+          oprot.writeString(struct.replPolicy);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.srcTxnToWriteIdList != null) {
+        if (struct.isSetSrcTxnToWriteIdList()) {
+          oprot.writeFieldBegin(SRC_TXN_TO_WRITE_ID_LIST_FIELD_DESC);
+          {
+            oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.srcTxnToWriteIdList.size()));
+            for (TxnToWriteId _iter641 : struct.srcTxnToWriteIdList)
+            {
+              _iter641.write(oprot);
+            }
+            oprot.writeListEnd();
+          }
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AllocateTableWriteIdsRequestTupleSchemeFactory implements SchemeFactory {
+    public AllocateTableWriteIdsRequestTupleScheme getScheme() {
+      return new AllocateTableWriteIdsRequestTupleScheme();
+    }
+  }
+
+  private static class AllocateTableWriteIdsRequestTupleScheme extends TupleScheme<AllocateTableWriteIdsRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tableName);
+      BitSet optionals = new BitSet();
+      if (struct.isSetTxnIds()) {
+        optionals.set(0);
+      }
+      if (struct.isSetReplPolicy()) {
+        optionals.set(1);
+      }
+      if (struct.isSetSrcTxnToWriteIdList()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetTxnIds()) {
+        {
+          oprot.writeI32(struct.txnIds.size());
+          for (long _iter642 : struct.txnIds)
+          {
+            oprot.writeI64(_iter642);
+          }
+        }
+      }
+      if (struct.isSetReplPolicy()) {
+        oprot.writeString(struct.replPolicy);
+      }
+      if (struct.isSetSrcTxnToWriteIdList()) {
+        {
+          oprot.writeI32(struct.srcTxnToWriteIdList.size());
+          for (TxnToWriteId _iter643 : struct.srcTxnToWriteIdList)
+          {
+            _iter643.write(oprot);
+          }
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tableName = iprot.readString();
+      struct.setTableNameIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list644 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32());
+          struct.txnIds = new ArrayList<Long>(_list644.size);
+          long _elem645;
+          for (int _i646 = 0; _i646 < _list644.size; ++_i646)
+          {
+            _elem645 = iprot.readI64();
+            struct.txnIds.add(_elem645);
+          }
+        }
+        struct.setTxnIdsIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.replPolicy = iprot.readString();
+        struct.setReplPolicyIsSet(true);
+      }
+      if (incoming.get(2)) {
+        {
+          org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+          struct.srcTxnToWriteIdList = new ArrayList<TxnToWriteId>(_list647.size);
+          TxnToWriteId _elem648;
+          for (int _i649 = 0; _i649 < _list647.size; ++_i649)
+          {
+            _elem648 = new TxnToWriteId();
+            _elem648.read(iprot);
+            struct.srcTxnToWriteIdList.add(_elem648);
+          }
+        }
+        struct.setSrcTxnToWriteIdListIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
new file mode 100644
index 0000000..13df26d
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AllocateTableWriteIdsResponse.java
@@ -0,0 +1,443 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AllocateTableWriteIdsResponse implements org.apache.thrift.TBase<AllocateTableWriteIdsResponse, AllocateTableWriteIdsResponse._Fields>, java.io.Serializable, Cloneable, Comparable<AllocateTableWriteIdsResponse> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AllocateTableWriteIdsResponse");
+
+  private static final org.apache.thrift.protocol.TField TXN_TO_WRITE_IDS_FIELD_DESC = new org.apache.thrift.protocol.TField("txnToWriteIds", org.apache.thrift.protocol.TType.LIST, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AllocateTableWriteIdsResponseStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AllocateTableWriteIdsResponseTupleSchemeFactory());
+  }
+
+  private List<TxnToWriteId> txnToWriteIds; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    TXN_TO_WRITE_IDS((short)1, "txnToWriteIds");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // TXN_TO_WRITE_IDS
+          return TXN_TO_WRITE_IDS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.TXN_TO_WRITE_IDS, new org.apache.thrift.meta_data.FieldMetaData("txnToWriteIds", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TxnToWriteId.class))));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AllocateTableWriteIdsResponse.class, metaDataMap);
+  }
+
+  public AllocateTableWriteIdsResponse() {
+  }
+
+  public AllocateTableWriteIdsResponse(
+    List<TxnToWriteId> txnToWriteIds)
+  {
+    this();
+    this.txnToWriteIds = txnToWriteIds;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AllocateTableWriteIdsResponse(AllocateTableWriteIdsResponse other) {
+    if (other.isSetTxnToWriteIds()) {
+      List<TxnToWriteId> __this__txnToWriteIds = new ArrayList<TxnToWriteId>(other.txnToWriteIds.size());
+      for (TxnToWriteId other_element : other.txnToWriteIds) {
+        __this__txnToWriteIds.add(new TxnToWriteId(other_element));
+      }
+      this.txnToWriteIds = __this__txnToWriteIds;
+    }
+  }
+
+  public AllocateTableWriteIdsResponse deepCopy() {
+    return new AllocateTableWriteIdsResponse(this);
+  }
+
+  @Override
+  public void clear() {
+    this.txnToWriteIds = null;
+  }
+
+  public int getTxnToWriteIdsSize() {
+    return (this.txnToWriteIds == null) ? 0 : this.txnToWriteIds.size();
+  }
+
+  public java.util.Iterator<TxnToWriteId> getTxnToWriteIdsIterator() {
+    return (this.txnToWriteIds == null) ? null : this.txnToWriteIds.iterator();
+  }
+
+  public void addToTxnToWriteIds(TxnToWriteId elem) {
+    if (this.txnToWriteIds == null) {
+      this.txnToWriteIds = new ArrayList<TxnToWriteId>();
+    }
+    this.txnToWriteIds.add(elem);
+  }
+
+  public List<TxnToWriteId> getTxnToWriteIds() {
+    return this.txnToWriteIds;
+  }
+
+  public void setTxnToWriteIds(List<TxnToWriteId> txnToWriteIds) {
+    this.txnToWriteIds = txnToWriteIds;
+  }
+
+  public void unsetTxnToWriteIds() {
+    this.txnToWriteIds = null;
+  }
+
+  /** Returns true if field txnToWriteIds is set (has been assigned a value) and false otherwise */
+  public boolean isSetTxnToWriteIds() {
+    return this.txnToWriteIds != null;
+  }
+
+  public void setTxnToWriteIdsIsSet(boolean value) {
+    if (!value) {
+      this.txnToWriteIds = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case TXN_TO_WRITE_IDS:
+      if (value == null) {
+        unsetTxnToWriteIds();
+      } else {
+        setTxnToWriteIds((List<TxnToWriteId>)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case TXN_TO_WRITE_IDS:
+      return getTxnToWriteIds();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case TXN_TO_WRITE_IDS:
+      return isSetTxnToWriteIds();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AllocateTableWriteIdsResponse)
+      return this.equals((AllocateTableWriteIdsResponse)that);
+    return false;
+  }
+
+  public boolean equals(AllocateTableWriteIdsResponse that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_txnToWriteIds = true && this.isSetTxnToWriteIds();
+    boolean that_present_txnToWriteIds = true && that.isSetTxnToWriteIds();
+    if (this_present_txnToWriteIds || that_present_txnToWriteIds) {
+      if (!(this_present_txnToWriteIds && that_present_txnToWriteIds))
+        return false;
+      if (!this.txnToWriteIds.equals(that.txnToWriteIds))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_txnToWriteIds = true && (isSetTxnToWriteIds());
+    list.add(present_txnToWriteIds);
+    if (present_txnToWriteIds)
+      list.add(txnToWriteIds);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AllocateTableWriteIdsResponse other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetTxnToWriteIds()).compareTo(other.isSetTxnToWriteIds());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTxnToWriteIds()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.txnToWriteIds, other.txnToWriteIds);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AllocateTableWriteIdsResponse(");
+    boolean first = true;
+
+    sb.append("txnToWriteIds:");
+    if (this.txnToWriteIds == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.txnToWriteIds);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetTxnToWriteIds()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'txnToWriteIds' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AllocateTableWriteIdsResponseStandardSchemeFactory implements SchemeFactory {
+    public AllocateTableWriteIdsResponseStandardScheme getScheme() {
+      return new AllocateTableWriteIdsResponseStandardScheme();
+    }
+  }
+
+  private static class AllocateTableWriteIdsResponseStandardScheme extends StandardScheme<AllocateTableWriteIdsResponse> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // TXN_TO_WRITE_IDS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list650 = iprot.readListBegin();
+                struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list650.size);
+                TxnToWriteId _elem651;
+                for (int _i652 = 0; _i652 < _list650.size; ++_i652)
+                {
+                  _elem651 = new TxnToWriteId();
+                  _elem651.read(iprot);
+                  struct.txnToWriteIds.add(_elem651);
+                }
+                iprot.readListEnd();
+              }
+              struct.setTxnToWriteIdsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.txnToWriteIds != null) {
+        oprot.writeFieldBegin(TXN_TO_WRITE_IDS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.txnToWriteIds.size()));
+          for (TxnToWriteId _iter653 : struct.txnToWriteIds)
+          {
+            _iter653.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AllocateTableWriteIdsResponseTupleSchemeFactory implements SchemeFactory {
+    public AllocateTableWriteIdsResponseTupleScheme getScheme() {
+      return new AllocateTableWriteIdsResponseTupleScheme();
+    }
+  }
+
+  private static class AllocateTableWriteIdsResponseTupleScheme extends TupleScheme<AllocateTableWriteIdsResponse> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.txnToWriteIds.size());
+        for (TxnToWriteId _iter654 : struct.txnToWriteIds)
+        {
+          _iter654.write(oprot);
+        }
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AllocateTableWriteIdsResponse struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.txnToWriteIds = new ArrayList<TxnToWriteId>(_list655.size);
+        TxnToWriteId _elem656;
+        for (int _i657 = 0; _i657 < _list655.size; ++_i657)
+        {
+          _elem656 = new TxnToWriteId();
+          _elem656.read(iprot);
+          struct.txnToWriteIds.add(_elem656);
+        }
+      }
+      struct.setTxnToWriteIdsIsSet(true);
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
new file mode 100644
index 0000000..e7cc091
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AlreadyExistsException.java
@@ -0,0 +1,395 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class AlreadyExistsException extends TException implements org.apache.thrift.TBase<AlreadyExistsException, AlreadyExistsException._Fields>, java.io.Serializable, Cloneable, Comparable<AlreadyExistsException> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("AlreadyExistsException");
+
+  private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new AlreadyExistsExceptionStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new AlreadyExistsExceptionTupleSchemeFactory());
+  }
+
+  private String message; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    MESSAGE((short)1, "message");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // MESSAGE
+          return MESSAGE;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(AlreadyExistsException.class, metaDataMap);
+  }
+
+  public AlreadyExistsException() {
+  }
+
+  public AlreadyExistsException(
+    String message)
+  {
+    this();
+    this.message = message;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public AlreadyExistsException(AlreadyExistsException other) {
+    if (other.isSetMessage()) {
+      this.message = other.message;
+    }
+  }
+
+  public AlreadyExistsException deepCopy() {
+    return new AlreadyExistsException(this);
+  }
+
+  @Override
+  public void clear() {
+    this.message = null;
+  }
+
+  public String getMessage() {
+    return this.message;
+  }
+
+  public void setMessage(String message) {
+    this.message = message;
+  }
+
+  public void unsetMessage() {
+    this.message = null;
+  }
+
+  /** Returns true if field message is set (has been assigned a value) and false otherwise */
+  public boolean isSetMessage() {
+    return this.message != null;
+  }
+
+  public void setMessageIsSet(boolean value) {
+    if (!value) {
+      this.message = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case MESSAGE:
+      if (value == null) {
+        unsetMessage();
+      } else {
+        setMessage((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case MESSAGE:
+      return getMessage();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case MESSAGE:
+      return isSetMessage();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof AlreadyExistsException)
+      return this.equals((AlreadyExistsException)that);
+    return false;
+  }
+
+  public boolean equals(AlreadyExistsException that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_message = true && this.isSetMessage();
+    boolean that_present_message = true && that.isSetMessage();
+    if (this_present_message || that_present_message) {
+      if (!(this_present_message && that_present_message))
+        return false;
+      if (!this.message.equals(that.message))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_message = true && (isSetMessage());
+    list.add(present_message);
+    if (present_message)
+      list.add(message);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(AlreadyExistsException other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMessage()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("AlreadyExistsException(");
+    boolean first = true;
+
+    sb.append("message:");
+    if (this.message == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.message);
+    }
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class AlreadyExistsExceptionStandardSchemeFactory implements SchemeFactory {
+    public AlreadyExistsExceptionStandardScheme getScheme() {
+      return new AlreadyExistsExceptionStandardScheme();
+    }
+  }
+
+  private static class AlreadyExistsExceptionStandardScheme extends StandardScheme<AlreadyExistsException> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, AlreadyExistsException struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // MESSAGE
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.message = iprot.readString();
+              struct.setMessageIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, AlreadyExistsException struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.message != null) {
+        oprot.writeFieldBegin(MESSAGE_FIELD_DESC);
+        oprot.writeString(struct.message);
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class AlreadyExistsExceptionTupleSchemeFactory implements SchemeFactory {
+    public AlreadyExistsExceptionTupleScheme getScheme() {
+      return new AlreadyExistsExceptionTupleScheme();
+    }
+  }
+
+  private static class AlreadyExistsExceptionTupleScheme extends TupleScheme<AlreadyExistsException> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, AlreadyExistsException struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetMessage()) {
+        optionals.set(0);
+      }
+      oprot.writeBitSet(optionals, 1);
+      if (struct.isSetMessage()) {
+        oprot.writeString(struct.message);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, AlreadyExistsException struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(1);
+      if (incoming.get(0)) {
+        struct.message = iprot.readString();
+        struct.setMessageIsSet(true);
+      }
+    }
+  }
+
+}
+


[50/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/.gitignore
----------------------------------------------------------------------
diff --git a/.gitignore b/.gitignore
index eb7235c..33f209d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,3 +31,4 @@ itests/hive-blobstore/src/test/resources/blobstore-conf.xml
 .DS_Store
 patchprocess
 standalone-metastore/src/gen/version
+standalone-metastore/metastore-common/src/gen/version

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/hcatalog/core/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/core/pom.xml b/hcatalog/core/pom.xml
index 4315527..06e83e1 100644
--- a/hcatalog/core/pom.xml
+++ b/hcatalog/core/pom.xml
@@ -69,7 +69,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/hcatalog/webhcat/java-client/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/java-client/pom.xml b/hcatalog/webhcat/java-client/pom.xml
index 56b6d53..185c785 100644
--- a/hcatalog/webhcat/java-client/pom.xml
+++ b/hcatalog/webhcat/java-client/pom.xml
@@ -76,7 +76,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/hcatalog/webhcat/svr/pom.xml
----------------------------------------------------------------------
diff --git a/hcatalog/webhcat/svr/pom.xml b/hcatalog/webhcat/svr/pom.xml
index 5c093b0..8e4bc70 100644
--- a/hcatalog/webhcat/svr/pom.xml
+++ b/hcatalog/webhcat/svr/pom.xml
@@ -202,7 +202,7 @@
     <!-- test inter-project -->
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/hcatalog-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hcatalog-unit/pom.xml b/itests/hcatalog-unit/pom.xml
index a1d2149..0568cf6 100644
--- a/itests/hcatalog-unit/pom.xml
+++ b/itests/hcatalog-unit/pom.xml
@@ -98,7 +98,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/hive-blobstore/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-blobstore/pom.xml b/itests/hive-blobstore/pom.xml
index 9466af2..b8d5f0d 100644
--- a/itests/hive-blobstore/pom.xml
+++ b/itests/hive-blobstore/pom.xml
@@ -55,13 +55,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <classifier>tests</classifier>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/hive-minikdc/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-minikdc/pom.xml b/itests/hive-minikdc/pom.xml
index 4d6e8a8..406c407 100644
--- a/itests/hive-minikdc/pom.xml
+++ b/itests/hive-minikdc/pom.xml
@@ -74,13 +74,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
       <classifier>tests</classifier>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/hive-unit-hadoop2/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit-hadoop2/pom.xml b/itests/hive-unit-hadoop2/pom.xml
index 04e39b5..3288659 100644
--- a/itests/hive-unit-hadoop2/pom.xml
+++ b/itests/hive-unit-hadoop2/pom.xml
@@ -101,7 +101,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <classifier>tests</classifier>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/hive-unit/pom.xml
----------------------------------------------------------------------
diff --git a/itests/hive-unit/pom.xml b/itests/hive-unit/pom.xml
index 5264617..6081884 100644
--- a/itests/hive-unit/pom.xml
+++ b/itests/hive-unit/pom.xml
@@ -177,7 +177,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <classifier>tests</classifier>
     </dependency>
@@ -568,7 +568,7 @@
                   <fileset dir="${basedir}/${hive.path.to.root}/metastore/scripts/"/>
                 </copy>
                 <copy todir="${test.tmp.dir}/scripts/metastore/upgrade" overwrite="true">
-                  <fileset dir="${basedir}/${hive.path.to.root}/standalone-metastore/src/main/sql/"/>
+                  <fileset dir="${basedir}/${hive.path.to.root}/standalone-metastore/metastore-common/src/main/sql/"/>
                 </copy>
               </target>
             </configuration>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/qtest-accumulo/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-accumulo/pom.xml b/itests/qtest-accumulo/pom.xml
index 1a1100c..e0c31e4 100644
--- a/itests/qtest-accumulo/pom.xml
+++ b/itests/qtest-accumulo/pom.xml
@@ -67,13 +67,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <classifier>tests</classifier>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/qtest-spark/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest-spark/pom.xml b/itests/qtest-spark/pom.xml
index 8ed3171..c33aa90 100644
--- a/itests/qtest-spark/pom.xml
+++ b/itests/qtest-spark/pom.xml
@@ -127,13 +127,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <classifier>tests</classifier>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/qtest/pom.xml
----------------------------------------------------------------------
diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml
index a1400fd..8020743 100644
--- a/itests/qtest/pom.xml
+++ b/itests/qtest/pom.xml
@@ -58,13 +58,13 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <classifier>tests</classifier>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/itests/util/pom.xml
----------------------------------------------------------------------
diff --git a/itests/util/pom.xml b/itests/util/pom.xml
index 9334f90..e1aece6 100644
--- a/itests/util/pom.xml
+++ b/itests/util/pom.xml
@@ -113,7 +113,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
     </dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/llap-server/pom.xml
----------------------------------------------------------------------
diff --git a/llap-server/pom.xml b/llap-server/pom.xml
index 978d797..6b52d5d 100644
--- a/llap-server/pom.xml
+++ b/llap-server/pom.xml
@@ -242,7 +242,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/metastore/pom.xml
----------------------------------------------------------------------
diff --git a/metastore/pom.xml b/metastore/pom.xml
index dc1b4e8..a75ab97 100644
--- a/metastore/pom.xml
+++ b/metastore/pom.xml
@@ -46,7 +46,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
     </dependency>
 	<dependency>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/packaging/src/main/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/bin.xml b/packaging/src/main/assembly/bin.xml
index 2dd9260..373ed99 100644
--- a/packaging/src/main/assembly/bin.xml
+++ b/packaging/src/main/assembly/bin.xml
@@ -222,7 +222,7 @@
     </fileSet>
 
     <fileSet>
-      <directory>${project.parent.basedir}/standalone-metastore/src/main/sql</directory>
+      <directory>${project.parent.basedir}/standalone-metastore/metastore-common/src/main/sql</directory>
       <includes>
         <include>**/*</include>
       </includes>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/packaging/src/main/assembly/src.xml
----------------------------------------------------------------------
diff --git a/packaging/src/main/assembly/src.xml b/packaging/src/main/assembly/src.xml
index c9aed3b..d9db67d 100644
--- a/packaging/src/main/assembly/src.xml
+++ b/packaging/src/main/assembly/src.xml
@@ -96,7 +96,7 @@
         <include>shims/**/*</include>
         <include>spark-client/**/*</include>
         <include>storage-api/**/*</include>
-        <include>standalone-metastore/**/*</include>
+        <include>standalone-metastore/metastore-common/**/*</include>
         <include>streaming/**/*</include>
         <include>testutils/**/*</include>
         <include>upgrade-acid/**/*</include>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/ql/pom.xml
----------------------------------------------------------------------
diff --git a/ql/pom.xml b/ql/pom.xml
index 0c181e5..fe11d06 100644
--- a/ql/pom.xml
+++ b/ql/pom.xml
@@ -458,7 +458,7 @@
     </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>
@@ -915,7 +915,7 @@
                   <include>org.apache.hive:hive-llap-common</include>
                   <include>org.apache.hive:hive-llap-client</include>
                   <include>org.apache.hive:hive-metastore</include>
-                  <include>org.apache.hive:hive-standalone-metastore</include>
+                  <include>org.apache.hive:hive-standalone-metastore-common</include>
                   <include>org.apache.hive:hive-service-rpc</include>
                   <include>com.esotericsoftware:kryo-shaded</include>
                   <include>com.esotericsoftware:minlog</include>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/service/pom.xml
----------------------------------------------------------------------
diff --git a/service/pom.xml b/service/pom.xml
index ebe7863..7023472 100644
--- a/service/pom.xml
+++ b/service/pom.xml
@@ -230,7 +230,7 @@
       </dependency>
     <dependency>
       <groupId>org.apache.hive</groupId>
-      <artifactId>hive-standalone-metastore</artifactId>
+      <artifactId>hive-standalone-metastore-common</artifactId>
       <version>${project.version}</version>
       <type>test-jar</type>
       <scope>test</scope>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/DEV-README
----------------------------------------------------------------------
diff --git a/standalone-metastore/DEV-README b/standalone-metastore/DEV-README
index 7b65e92..9c26117 100644
--- a/standalone-metastore/DEV-README
+++ b/standalone-metastore/DEV-README
@@ -49,7 +49,7 @@ You can download the Oracle driver at
 http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
 You should download Oracle 11g Release 1, ojdbc6.jar
 
-Logs for tests are located under standalone-metastore/target/failsafe-reports
+Logs for tests are located under standalone-metastore/metastore-common/target/failsafe-reports
 
 If you wish to use one of these containers to run your own tests against a
 non-Derby version of the metastore, you can do that as well.  You must specify

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/findbugs/findbugs-exclude.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/findbugs/findbugs-exclude.xml b/standalone-metastore/metastore-common/findbugs/findbugs-exclude.xml
new file mode 100644
index 0000000..e2c76d0
--- /dev/null
+++ b/standalone-metastore/metastore-common/findbugs/findbugs-exclude.xml
@@ -0,0 +1,24 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<FindBugsFilter>
+    <Match>
+        <Class name="~org.apache.hadoop.hive.metastore.parser.*" />
+    </Match>
+    <Match>
+        <Class name="~org.apache.hadoop.hive.metastore.api.*" />
+    </Match>
+</FindBugsFilter>

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/pom.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/pom.xml b/standalone-metastore/metastore-common/pom.xml
new file mode 100644
index 0000000..d6df15f
--- /dev/null
+++ b/standalone-metastore/metastore-common/pom.xml
@@ -0,0 +1,754 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <artifactId>hive-standalone-metastore</artifactId>
+    <groupId>org.apache.hive</groupId>
+    <version>4.0.0-SNAPSHOT</version>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+
+  <artifactId>hive-standalone-metastore-common</artifactId>
+  <name>Hive Standalone Metastore Common Code</name>
+
+  <dependencies>
+    <dependency>
+      <groupId>org.apache.orc</groupId>
+      <artifactId>orc-core</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>org.apache.hadoop</groupId>
+          <artifactId>hadoop-common</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>org.apache.hive</groupId>
+          <artifactId>hive-storage-api</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>com.fasterxml.jackson.core</groupId>
+      <artifactId>jackson-databind</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.github.joshelser</groupId>
+      <artifactId>dropwizard-metrics-hadoop-metrics2-reporter</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.jolbox</groupId>
+      <artifactId>bonecp</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.zaxxer</groupId>
+      <artifactId>HikariCP</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-dbcp</groupId>
+      <artifactId>commons-dbcp</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-jvm</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-json</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>javolution</groupId>
+      <artifactId>javolution</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.antlr</groupId>
+      <artifactId>antlr-runtime</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.commons</groupId>
+      <artifactId>commons-lang3</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.derby</groupId>
+      <artifactId>derby</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-distcp</artifactId>
+      <scope>provided</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-mapreduce-client-core</artifactId>
+      <optional>true</optional>
+      <exclusions>
+        <exclusion>
+          <groupId>org.slf4j</groupId>
+          <artifactId>slf4j-log4j12</artifactId>
+        </exclusion>
+        <exclusion>
+          <groupId>commons-logging</groupId>
+          <artifactId>commons-logging</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <!-- This is our one and only Hive dependency.-->
+    <dependency>
+      <groupId>org.apache.hive</groupId>
+      <artifactId>hive-storage-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-slf4j-impl</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.logging.log4j</groupId>
+      <artifactId>log4j-1.2-api</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.thrift</groupId>
+      <artifactId>libfb303</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.thrift</groupId>
+      <artifactId>libthrift</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.datanucleus</groupId>
+      <artifactId>datanucleus-api-jdo</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.datanucleus</groupId>
+      <artifactId>datanucleus-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.datanucleus</groupId>
+      <artifactId>datanucleus-rdbms</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.datanucleus</groupId>
+      <artifactId>javax.jdo</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>org.skyscreamer</groupId>
+      <artifactId>jsonassert</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>sqlline</groupId>
+      <artifactId>sqlline</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>commons-logging</groupId>
+      <artifactId>commons-logging</artifactId>
+    </dependency>
+    <!-- test scope dependencies -->
+
+    <dependency>
+      <groupId>com.microsoft.sqlserver</groupId>
+      <artifactId>mssql-jdbc</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <!-- Note, this is LGPL.  But we're only using it in a test and not changing it, so I
+      believe we are fine. -->
+      <groupId>org.mariadb.jdbc</groupId>
+      <artifactId>mariadb-java-client</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.postgresql</groupId>
+      <artifactId>postgresql</artifactId>
+      <scope>test</scope>
+    </dependency>
+  </dependencies>
+
+  <profiles>
+    <profile>
+      <id>thriftif</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.apache.maven.plugins</groupId>
+            <artifactId>maven-antrun-plugin</artifactId>
+            <executions>
+              <execution>
+                <id>generate-thrift-sources</id>
+                <phase>generate-sources</phase>
+                <configuration>
+                  <target>
+                    <taskdef name="for" classname="net.sf.antcontrib.logic.ForTask"
+                             classpathref="maven.plugin.classpath" />
+                    <property name="thrift.args" value="${thrift.args}"/>
+                    <property name="thrift.gen.dir" value="${thrift.gen.dir}"/>
+                    <delete dir="${thrift.gen.dir}"/>
+                    <mkdir dir="${thrift.gen.dir}"/>
+                    <for param="thrift.file">
+                      <path>
+                        <fileset dir="." includes="src/main/thrift/*.thrift" />
+                      </path>
+                      <sequential>
+                        <echo message="Generating Thrift code for @{thrift.file}"/>
+                        <exec executable="${thrift.home}/bin/thrift"  failonerror="true" dir=".">
+                          <arg line="${thrift.args} -I ${basedir}/include -I ${basedir}/.. -o ${thrift.gen.dir} @{thrift.file} " />
+                        </exec>
+                      </sequential>
+                    </for>
+                  </target>
+                </configuration>
+                <goals>
+                  <goal>run</goal>
+                </goals>
+              </execution>
+            </executions>
+          </plugin>
+          <plugin>
+            <groupId>com.google.code.maven-replacer-plugin</groupId>
+            <artifactId>replacer</artifactId>
+            <version>1.5.3</version>
+            <executions>
+              <execution>
+                <id>process-thrift-sources-string-intern</id>
+                <phase>process-sources</phase>
+                <goals>
+                  <goal>replace</goal>
+                </goals>
+                <configuration>
+                  <basedir>${basedir}/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/</basedir>
+                  <includes>
+                    <include>FieldSchema.java</include>
+                    <include>Partition.java</include>
+                    <include>SerDeInfo.java</include>
+                    <include>StorageDescriptor.java</include>
+                    <include>ColumnStatisticsDesc.java</include>
+                    <include>ColumnStatisticsObj.java</include>
+                  </includes>
+                  <tokenValueMap>${basedir}/src/main/resources/thrift-replacements.txt</tokenValueMap>
+                  <regex>true</regex>
+                  <quiet>false</quiet>
+                </configuration>
+              </execution>
+              <execution>
+                <id>process-thrift-sources-interface-annotations</id>
+                <phase>process-sources</phase>
+                <goals>
+                  <goal>replace</goal>
+                </goals>
+                <configuration>
+                  <basedir>${basedir}/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/</basedir>
+                  <filesToInclude>*.java</filesToInclude>
+                  <replacements>
+                    <replacement>
+                      <token>public class</token>
+                      <value>@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class</value>
+                      <unescape>true</unescape>
+                    </replacement>
+                    <replacement>
+                      <token>public static class</token>
+                      <value>@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public static class</value>
+                      <unescape>true</unescape>
+                    </replacement>
+                    <replacement>
+                      <token>public interface</token>
+                      <value>@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public interface</value>
+                      <unescape>true</unescape>
+                    </replacement>
+                  </replacements>
+                </configuration>
+              </execution>
+            </executions>
+          </plugin>
+        </plugins>
+      </build>
+    </profile>
+    <profile>
+      <id>findbugs</id>
+      <build>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>findbugs-maven-plugin</artifactId>
+            <version>3.0.0</version>
+            <configuration>
+              <fork>true</fork>
+              <maxHeap>2048</maxHeap>
+              <jvmArgs>-Djava.awt.headless=true -Xmx2048m -Xms512m</jvmArgs>
+              <excludeFilterFile>${basedir}/findbugs/findbugs-exclude.xml</excludeFilterFile>
+            </configuration>
+          </plugin>
+        </plugins>
+      </build>
+      <reporting>
+        <plugins>
+          <plugin>
+            <groupId>org.codehaus.mojo</groupId>
+            <artifactId>findbugs-maven-plugin</artifactId>
+            <version>3.0.0</version>
+            <configuration>
+              <fork>true</fork>
+              <maxHeap>2048</maxHeap>
+              <jvmArgs>-Djava.awt.headless=true -Xmx2048m -Xms512m</jvmArgs>
+              <excludeFilterFile>${basedir}/findbugs/findbugs-exclude.xml</excludeFilterFile>
+            </configuration>
+          </plugin>
+        </plugins>
+      </reporting>
+    </profile>
+    <!--
+  <profile>
+    <id>checkin</id>
+    <build>
+      <plugins>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-surefire-plugin</artifactId>
+          <version>${maven.surefire.version}</version>
+          <configuration>
+            <includes>
+              <include>**/Test*</include>
+            </includes>
+            <redirectTestOutputToFile>true</redirectTestOutputToFile>
+            <reuseForks>false</reuseForks>
+            <forkCount>${test.forkcount}</forkCount>
+            <argLine>-Xmx2048m</argLine>
+            <failIfNoTests>false</failIfNoTests>
+            <systemPropertyVariables>
+              <build.dir>${project.build.directory}</build.dir>
+              <datanucleus.schema.autoCreateAll>true</datanucleus.schema.autoCreateAll>
+              <derby.version>${derby.version}</derby.version>
+              <derby.stream.error.file>${test.tmp.dir}/derby.log</derby.stream.error.file>
+              <log4j.debug>true</log4j.debug>
+              <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
+              <javax.jdo.option.ConnectionURL>jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true</javax.jdo.option.ConnectionURL>
+              <metastore.schema.verification>false</metastore.schema.verification>
+              <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
+              <metastore.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</metastore.warehouse.dir>
+            </systemPropertyVariables>
+            <additionalClasspathElements>
+              <additionalClasspathElement>${log4j.conf.dir}</additionalClasspathElement>
+            </additionalClasspathElements>
+          </configuration>
+
+        </plugin>
+
+
+      </plugins>
+    </build>
+  </profile>
+  -->
+  </profiles>
+
+  <build>
+    <resources>
+      <resource>
+        <directory>${basedir}/src/main/resources</directory>
+        <includes>
+          <include>package.jdo</include>
+        </includes>
+      </resource>
+    </resources>
+
+    <pluginManagement>
+      <plugins>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-antrun-plugin</artifactId>
+          <version>${maven.antrun.plugin.version}</version>
+          <dependencies>
+            <dependency>
+              <groupId>ant-contrib</groupId>
+              <artifactId>ant-contrib</artifactId>
+              <version>${ant.contrib.version}</version>
+              <exclusions>
+                <exclusion>
+                  <groupId>ant</groupId>
+                  <artifactId>ant</artifactId>
+                </exclusion>
+              </exclusions>
+            </dependency>
+          </dependencies>
+        </plugin>
+        <plugin>
+          <groupId>org.apache.maven.plugins</groupId>
+          <artifactId>maven-checkstyle-plugin</artifactId>
+          <version>${maven.checkstyle.plugin.version}</version>
+        </plugin>
+        <plugin>
+          <groupId>org.codehaus.mojo</groupId>
+          <artifactId>exec-maven-plugin</artifactId>
+          <version>${maven.exec.plugin.version}</version>
+        </plugin>
+      </plugins>
+    </pluginManagement>
+    <plugins>
+      <!-- plugins are always listed in sorted order by groupId, artifectId -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-antrun-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>setup-test-dirs</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <delete dir="${test.tmp.dir}" />
+                <delete dir="${test.warehouse.dir}" />
+                <mkdir dir="${test.tmp.dir}" />
+                <mkdir dir="${test.warehouse.dir}" />
+              </target>
+            </configuration>
+          </execution>
+          <execution>
+            <id>generate-version-annotation</id>
+            <phase>generate-sources</phase>
+            <configuration>
+              <target>
+                <exec executable="bash" failonerror="true">
+                  <arg value="${basedir}/src/main/resources/saveVersion.sh"/>
+                  <arg value="${project.version}"/>
+                  <arg value="${hive.version.shortname}"/>
+                  <arg value="${basedir}/src"/>
+                </exec>
+              </target>
+            </configuration>
+            <goals>
+              <goal>run</goal>
+            </goals>
+          </execution>
+          <execution>
+            <id>setup-metastore-scripts</id>
+            <phase>process-test-resources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <target>
+                <mkdir dir="${test.tmp.dir}/scripts/metastore/upgrade" />
+                <copy todir="${test.tmp.dir}/scripts/metastore/upgrade">
+                  <fileset dir="${basedir}/src/main/sql/"/>
+                </copy>
+              </target>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-checkstyle-plugin</artifactId>
+        <configuration>
+          <configLocation>${checkstyle.conf.dir}/checkstyle.xml</configLocation>
+          <propertyExpansion>config_loc=${checkstyle.conf.dir}</propertyExpansion>
+          <includeTestSourceDirectory>true</includeTestSourceDirectory>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>com.github.os72</groupId>
+        <artifactId>protoc-jar-maven-plugin</artifactId>
+        <version>3.5.1.1</version>
+        <executions>
+          <execution>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>run</goal>
+            </goals>
+            <configuration>
+              <protocArtifact>com.google.protobuf:protoc:2.5.0</protocArtifact>
+              <addSources>none</addSources>
+              <inputDirectories>
+                <include>${basedir}/src/main/protobuf/org/apache/hadoop/hive/metastore</include>
+              </inputDirectories>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <!-- TODO MS-SPLIT javadoc plugin -->
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-assembly-plugin</artifactId>
+        <version>${maven.assembly.plugin.version}</version>
+        <executions>
+          <execution>
+            <id>assemble</id>
+            <phase>package</phase>
+            <goals>
+              <goal>single</goal>
+            </goals>
+            <configuration>
+              <finalName>apache-hive-metastore-${project.version}</finalName>
+              <descriptors>
+                <descriptor>src/assembly/bin.xml</descriptor>
+                <descriptor>src/assembly/src.xml</descriptor>
+              </descriptors>
+              <tarLongFileMode>gnu</tarLongFileMode>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-enforcer-plugin</artifactId>
+        <executions>
+          <execution>
+            <id>enforce-banned-dependencies</id>
+            <goals>
+              <goal>enforce</goal>
+            </goals>
+            <configuration>
+              <rules>
+                <bannedDependencies>
+                  <excludes>
+                    <!--LGPL licenced library-->
+                    <exclude>com.google.code.findbugs:annotations</exclude>
+                  </excludes>
+                </bannedDependencies>
+              </rules>
+              <fail>true</fail>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-failsafe-plugin</artifactId>
+        <version>2.20.1</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>integration-test</goal>
+              <goal>verify</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <redirectTestOutputToFile>true</redirectTestOutputToFile>
+          <reuseForks>false</reuseForks>
+          <argLine>-Xmx2048m</argLine>
+          <failIfNoTests>false</failIfNoTests>
+          <systemPropertyVariables>
+            <log4j.debug>true</log4j.debug>
+            <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
+            <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
+            <hive.in.test>true</hive.in.test>
+          </systemPropertyVariables>
+          <additionalClasspathElements>
+            <additionalClasspathElement>${log4j.conf.dir}</additionalClasspathElement>
+            <additionalClasspathElement>${itest.jdbc.jars}</additionalClasspathElement>
+          </additionalClasspathElements>
+          <skipITs>${skipITests}</skipITs> <!-- set this to false to run these tests -->
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-surefire-plugin</artifactId>
+        <version>${maven.surefire.version}</version>
+        <configuration>
+          <redirectTestOutputToFile>true</redirectTestOutputToFile>
+          <reuseForks>false</reuseForks>
+          <forkCount>${test.forkcount}</forkCount>
+          <argLine>-Xmx2048m</argLine>
+          <failIfNoTests>false</failIfNoTests>
+          <systemPropertyVariables>
+            <build.dir>${project.build.directory}</build.dir>
+            <datanucleus.schema.autoCreateAll>true</datanucleus.schema.autoCreateAll>
+            <derby.version>${derby.version}</derby.version>
+            <derby.stream.error.file>${test.tmp.dir}/derby.log</derby.stream.error.file>
+            <log4j.debug>true</log4j.debug>
+            <java.io.tmpdir>${test.tmp.dir}</java.io.tmpdir>
+            <javax.jdo.option.ConnectionURL>jdbc:derby:memory:${test.tmp.dir}/junit_metastore_db;create=true</javax.jdo.option.ConnectionURL>
+            <metastore.schema.verification>false</metastore.schema.verification>
+            <test.tmp.dir>${test.tmp.dir}</test.tmp.dir>
+            <metastore.warehouse.dir>${test.warehouse.scheme}${test.warehouse.dir}</metastore.warehouse.dir>
+          </systemPropertyVariables>
+          <additionalClasspathElements>
+            <additionalClasspathElement>${log4j.conf.dir}</additionalClasspathElement>
+          </additionalClasspathElements>
+          <groups>${test.groups}</groups>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.rat</groupId>
+        <artifactId>apache-rat-plugin</artifactId>
+        <version>0.10</version>
+        <configuration>
+          <excludes>
+            <exclude>binary-package-licenses/**</exclude>
+            <exclude>DEV-README</exclude>
+            <exclude>**/src/main/sql/**</exclude>
+            <exclude>**/README.md</exclude>
+            <exclude>**/*.iml</exclude>
+            <exclude>**/*.txt</exclude>
+            <exclude>**/*.log</exclude>
+            <exclude>**/*.arcconfig</exclude>
+            <exclude>**/package-info.java</exclude>
+            <exclude>**/*.properties</exclude>
+            <exclude>**/*.q</exclude>
+            <exclude>**/*.q.out</exclude>
+            <exclude>**/*.xml</exclude>
+            <exclude>**/gen/**</exclude>
+            <exclude>**/patchprocess/**</exclude>
+            <exclude>**/metastore_db/**</exclude>
+          </excludes>
+        </configuration>
+      </plugin>
+      <plugin>
+        <groupId>org.apache.maven.plugins</groupId>
+        <artifactId>maven-jar-plugin</artifactId>
+        <executions>
+          <execution>
+            <goals>
+              <goal>test-jar</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>build-helper-maven-plugin</artifactId>
+        <version>3.0.0</version>
+        <executions>
+          <execution>
+            <id>add-source</id>
+            <phase>generate-sources</phase>
+            <goals>
+              <goal>add-source</goal>
+            </goals>
+            <configuration>
+              <sources>
+                <source>src/gen/thrift/gen-javabean</source>
+                <source>${project.build.directory}/generated-sources</source>
+                <source>src/gen/version</source>
+              </sources>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.codehaus.mojo</groupId>
+        <artifactId>exec-maven-plugin</artifactId>
+        <executions>
+          <execution>
+            <phase>prepare-package</phase>
+            <goals>
+              <goal>exec</goal>
+            </goals>
+            <configuration>
+              <executable>java</executable>
+              <arguments>
+                <argument>-classpath</argument>
+                <classpath/>
+                <argument>org.apache.hadoop.hive.metastore.conf.ConfTemplatePrinter</argument>
+                <argument>${project.build.directory}/generated-sources/conf/metastore-site.xml.template</argument>
+              </arguments>
+            </configuration>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.datanucleus</groupId>
+        <artifactId>datanucleus-maven-plugin</artifactId>
+        <version>4.0.5</version>
+        <configuration>
+          <api>JDO</api>
+          <verbose>false</verbose>
+          <log4jConfiguration>${basedir}/src/main/resources/datanucleus-log4j.properties</log4jConfiguration>
+          <metadataIncludes>**/*.jdo</metadataIncludes>
+          <fork>false</fork>
+        </configuration>
+        <executions>
+          <execution>
+            <phase>process-classes</phase>
+            <goals>
+              <goal>enhance</goal>
+            </goals>
+          </execution>
+        </executions>
+      </plugin>
+      <plugin>
+        <groupId>org.antlr</groupId>
+        <artifactId>antlr3-maven-plugin</artifactId>
+        <version>${antlr.version}</version>
+        <executions>
+          <execution>
+            <goals>
+              <goal>antlr</goal>
+            </goals>
+          </execution>
+        </executions>
+        <configuration>
+          <outputDirectory>${project.build.directory}/generated-sources</outputDirectory>
+          <sourceDirectory>${basedir}/src/main/java</sourceDirectory>
+        </configuration>
+      </plugin>
+    </plugins>
+  </build>
+
+</project>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/assembly/bin.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/assembly/bin.xml b/standalone-metastore/metastore-common/src/assembly/bin.xml
new file mode 100644
index 0000000..81912d7
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/assembly/bin.xml
@@ -0,0 +1,136 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+
+  <id>bin</id>
+
+  <formats>
+    <format>dir</format>
+    <format>tar.gz</format>
+  </formats>
+
+  <baseDirectory>apache-hive-metastore-${project.version}-bin</baseDirectory>
+
+  <dependencySets>
+    <dependencySet>
+      <outputDirectory>lib</outputDirectory>
+      <unpack>false</unpack>
+      <useProjectArtifact>true</useProjectArtifact>
+      <useStrictFiltering>true</useStrictFiltering>
+      <useTransitiveFiltering>true</useTransitiveFiltering>
+      <excludes>
+        <exclude>org.apache.hadoop:*</exclude>
+        <exclude>org.slf4j:*</exclude>
+        <exclude>log4j:*</exclude>
+      </excludes>
+    </dependencySet>
+  </dependencySets>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+      <excludes>
+        <exclude>target/**</exclude>
+        <exclude>.classpath</exclude>
+        <exclude>.project</exclude>
+        <exclude>.settings/**</exclude>
+        <exclude>lib/**</exclude>
+      </excludes>
+
+      <includes>
+        <include>README.txt</include>
+        <include>LICENSE</include>
+        <include>NOTICE</include>
+      </includes>
+      <outputDirectory>/</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/binary-package-licenses</directory>
+      <includes>
+        <include>/*</include>
+      </includes>
+      <excludes>
+        <exclude>/README</exclude>
+      </excludes>
+      <outputDirectory>binary-package-licenses</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <fileMode>755</fileMode>
+      <directory>${project.basedir}/src/main/scripts</directory>
+      <includes>
+        <include>base</include>
+        <include>schematool</include>
+        <include>start-metastore</include>
+        <include>metastore-config.sh</include>
+        <include>ext/**/*</include>
+      </includes>
+      <outputDirectory>bin</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/main/sql</directory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>scripts/metastore/upgrade</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/gen/thrift/gen-php</directory>
+      <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>lib/php/packages/hive_metastore</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/gen/thrift/gen-py/hive_metastore</directory>
+      <fileMode>755</fileMode>
+       <includes>
+        <include>**/*</include>
+      </includes>
+      <outputDirectory>lib/py/hive_metastore</outputDirectory>
+    </fileSet>
+
+    <fileSet>
+      <directory>${project.basedir}/src/main/resources/</directory>
+      <fileMode>644</fileMode>
+      <includes>
+        <include>metastore-site.xml</include>
+        <include>metastore-log4j2.properties</include>
+      </includes>
+      <outputDirectory>conf</outputDirectory>
+    </fileSet>
+  </fileSets>
+
+  <files>
+    <file>
+      <source>${project.build.directory}/generated-sources/conf/metastore-site.xml.template</source>
+      <outputDirectory>conf</outputDirectory>
+    </file>
+  </files>
+
+</assembly>
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/assembly/src.xml
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/assembly/src.xml b/standalone-metastore/metastore-common/src/assembly/src.xml
new file mode 100644
index 0000000..a240544
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/assembly/src.xml
@@ -0,0 +1,53 @@
+<!--
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ "License"); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing,
+ software distributed under the License is distributed on an
+ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied.  See the License for the
+ specific language governing permissions and limitations
+ under the License.
+-->
+
+<assembly
+  xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2"
+  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+  xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.2 http://maven.apache.org/xsd/assembly-1.1.2.xsd">
+
+  <id>src</id>
+
+  <formats>
+    <format>tar.gz</format>
+  </formats>
+
+  <baseDirectory>apache-hive-metastore-${project.version}-src</baseDirectory>
+
+  <fileSets>
+    <fileSet>
+      <directory>${project.basedir}</directory>
+
+      <excludes>
+        <exclude>target/**</exclude>
+      </excludes>
+
+      <includes>
+        <include>.checkstyle</include>
+        <include>.gitattributes</include>
+        <include>.gitignore</include>
+        <include>LICENSE</include>
+        <include>NOTICE</include>
+        <include>pom.xml</include>
+        <include>src/**/*</include>
+      </includes>
+      <outputDirectory>/</outputDirectory>
+    </fileSet>
+  </fileSets>
+</assembly>


[64/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
index 0000000,d972d10..df35f22
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnStore.java
@@@ -1,0 -1,490 +1,496 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import com.google.common.annotations.VisibleForTesting;
++
+ import org.apache.hadoop.classification.InterfaceAudience;
+ import org.apache.hadoop.classification.InterfaceStability;
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.common.classification.RetrySemantics;
+ import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.events.AcidWriteEvent;
+ 
+ import java.sql.SQLException;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Set;
+ 
+ /**
+  * A handler to answer transaction related calls that come into the metastore
+  * server.
+  */
+ @InterfaceAudience.Private
+ @InterfaceStability.Evolving
+ public interface TxnStore extends Configurable {
+ 
+   enum MUTEX_KEY {
+     Initiator, Cleaner, HouseKeeper, CompactionHistory, CheckLock,
+     WriteSetCleaner, CompactionScheduler, WriteIdAllocator
+   }
+   // Compactor states (Should really be enum)
+   String INITIATED_RESPONSE = "initiated";
+   String WORKING_RESPONSE = "working";
+   String CLEANING_RESPONSE = "ready for cleaning";
+   String FAILED_RESPONSE = "failed";
+   String SUCCEEDED_RESPONSE = "succeeded";
+   String ATTEMPTED_RESPONSE = "attempted";
+ 
+   int TIMED_OUT_TXN_ABORT_BATCH_SIZE = 50000;
+ 
+   /**
+    * Get information about open transactions.  This gives extensive information about the
+    * transactions rather than just the list of transactions.  This should be used when the need
+    * is to see information about the transactions (e.g. show transactions).
+    * @return information about open transactions
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetOpenTxnsInfoResponse getOpenTxnsInfo() throws MetaException;
+ 
+   /**
+    * Get list of valid transactions.  This gives just the list of transactions that are open.
+    * @return list of open transactions, as well as a high water mark.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetOpenTxnsResponse getOpenTxns() throws MetaException;
+ 
+   /**
+    * Get the count for open transactions.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   void countOpenTxns() throws MetaException;
+ 
+   /**
+    * Open a set of transactions
+    * @param rqst request to open transactions
+    * @return information on opened transactions
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   OpenTxnsResponse openTxns(OpenTxnRequest rqst) throws MetaException;
+ 
+   @RetrySemantics.Idempotent
+   long getTargetTxnId(String replPolicy, long sourceTxnId) throws MetaException;
+ 
+   /**
+    * Abort (rollback) a transaction.
+    * @param rqst info on transaction to abort
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void abortTxn(AbortTxnRequest rqst) throws NoSuchTxnException, MetaException, TxnAbortedException;
+ 
+   /**
+    * Abort (rollback) a list of transactions in one request.
+    * @param rqst info on transactions to abort
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void abortTxns(AbortTxnsRequest rqst) throws NoSuchTxnException, MetaException;
+ 
+   /**
+    * Commit a transaction
+    * @param rqst info on transaction to commit
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void commitTxn(CommitTxnRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException,  MetaException;
+ 
+   /**
+    * Replicate Table Write Ids state to mark aborted write ids and writeid high water mark.
+    * @param rqst info on table/partitions and writeid snapshot to replicate.
+    * @throws MetaException in case of failure
+    */
+   @RetrySemantics.Idempotent
+   void replTableWriteIdState(ReplTblWriteIdStateRequest rqst) throws MetaException;
+ 
+   /**
+    * Get the first transaction corresponding to given database and table after transactions
+    * referenced in the transaction snapshot.
+    * @return
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   BasicTxnInfo getFirstCompletedTransactionForTableAfterCommit(
+       String inputDbName, String inputTableName, ValidWriteIdList txnList)
+           throws MetaException;
++
++  @RetrySemantics.ReadOnly
++  long getTxnIdForWriteId(String dbName, String tblName, long writeId)
++      throws MetaException;
++
+   /**
+    * Gets the list of valid write ids for the given table wrt to current txn
+    * @param rqst info on transaction and list of table names associated with given transaction
+    * @throws NoSuchTxnException
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   GetValidWriteIdsResponse getValidWriteIds(GetValidWriteIdsRequest rqst)
+           throws NoSuchTxnException,  MetaException;
+ 
+   /**
+    * Allocate a write ID for the given table and associate it with a transaction
+    * @param rqst info on transaction and table to allocate write id
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   AllocateTableWriteIdsResponse allocateTableWriteIds(AllocateTableWriteIdsRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Called on conversion of existing table to full acid.  Sets initial write ID to a high
+    * enough value so that we can assign unique ROW__IDs to data in existing files.
+    */
+   void seedWriteIdOnAcidConversion(InitializeTableWriteIdsRequest rqst) throws MetaException;
+ 
+   /**
+    * Obtain a lock.
+    * @param rqst information on the lock to obtain.  If the requester is part of a transaction
+    *             the txn information must be included in the lock request.
+    * @return info on the lock, including whether it was obtained.
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.CannotRetry
+   LockResponse lock(LockRequest rqst)
+     throws NoSuchTxnException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Check whether a lock has been obtained.  This is used after {@link #lock} returned a wait
+    * state.
+    * @param rqst info on the lock to check
+    * @return info on the state of the lock
+    * @throws NoSuchTxnException
+    * @throws NoSuchLockException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   LockResponse checkLock(CheckLockRequest rqst)
+     throws NoSuchTxnException, NoSuchLockException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Unlock a lock.  It is not legal to call this if the caller is part of a txn.  In that case
+    * the txn should be committed or aborted instead.  (Note someday this will change since
+    * multi-statement transactions will allow unlocking in the transaction.)
+    * @param rqst lock to unlock
+    * @throws NoSuchLockException
+    * @throws TxnOpenException
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void unlock(UnlockRequest rqst)
+     throws NoSuchLockException, TxnOpenException, MetaException;
+ 
+   /**
+    * Get information on current locks.
+    * @param rqst lock information to retrieve
+    * @return lock information.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   ShowLocksResponse showLocks(ShowLocksRequest rqst) throws MetaException;
+ 
+   /**
+    * Send a heartbeat for a lock or a transaction
+    * @param ids lock and/or txn id to heartbeat
+    * @throws NoSuchTxnException
+    * @throws NoSuchLockException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void heartbeat(HeartbeatRequest ids)
+     throws NoSuchTxnException,  NoSuchLockException, TxnAbortedException, MetaException;
+ 
+   /**
+    * Heartbeat a group of transactions together
+    * @param rqst set of transactions to heartbat
+    * @return info on txns that were heartbeated
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   HeartbeatTxnRangeResponse heartbeatTxnRange(HeartbeatTxnRangeRequest rqst)
+     throws MetaException;
+ 
+   /**
+    * Submit a compaction request into the queue.  This is called when a user manually requests a
+    * compaction.
+    * @param rqst information on what to compact
+    * @return id of the compaction that has been started or existing id if this resource is already scheduled
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   CompactionResponse compact(CompactionRequest rqst) throws MetaException;
+ 
+   /**
+    * Show list of current compactions.
+    * @param rqst info on which compactions to show
+    * @return compaction information
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   ShowCompactResponse showCompact(ShowCompactRequest rqst) throws MetaException;
+ 
+   /**
+    * Add information on a set of dynamic partitions that participated in a transaction.
+    * @param rqst dynamic partition info.
+    * @throws NoSuchTxnException
+    * @throws TxnAbortedException
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void addDynamicPartitions(AddDynamicPartitions rqst)
+       throws NoSuchTxnException,  TxnAbortedException, MetaException;
+ 
+   /**
+    * Clean up corresponding records in metastore tables.
+    * @param type Hive object type
+    * @param db database object
+    * @param table table object
+    * @param partitionIterator partition iterator
+    * @throws MetaException
+    */
+   @RetrySemantics.Idempotent
+   void cleanupRecords(HiveObjectType type, Database db, Table table,
+                              Iterator<Partition> partitionIterator) throws MetaException;
+ 
+   @RetrySemantics.Idempotent
+   void onRename(String oldCatName, String oldDbName, String oldTabName, String oldPartName,
+       String newCatName, String newDbName, String newTabName, String newPartName)
+       throws MetaException;
+ 
+   /**
+    * Timeout transactions and/or locks.  This should only be called by the compactor.
+    */
+   @RetrySemantics.Idempotent
+   void performTimeOuts();
+ 
+   /**
+    * This will look through the completed_txn_components table and look for partitions or tables
+    * that may be ready for compaction.  Also, look through txns and txn_components tables for
+    * aborted transactions that we should add to the list.
+    * @param maxAborted Maximum number of aborted queries to allow before marking this as a
+    *                   potential compaction.
+    * @return list of CompactionInfo structs.  These will not have id, type,
+    * or runAs set since these are only potential compactions not actual ones.
+    */
+   @RetrySemantics.ReadOnly
+   Set<CompactionInfo> findPotentialCompactions(int maxAborted) throws MetaException;
+ 
+   /**
+    * Sets the user to run as.  This is for the case
+    * where the request was generated by the user and so the worker must set this value later.
+    * @param cq_id id of this entry in the queue
+    * @param user user to run the jobs as
+    */
+   @RetrySemantics.Idempotent
+   void setRunAs(long cq_id, String user) throws MetaException;
+ 
+   /**
+    * This will grab the next compaction request off of
+    * the queue, and assign it to the worker.
+    * @param workerId id of the worker calling this, will be recorded in the db
+    * @return an info element for this compaction request, or null if there is no work to do now.
+    */
+   @RetrySemantics.ReadOnly
+   CompactionInfo findNextToCompact(String workerId) throws MetaException;
+ 
+   /**
+    * This will mark an entry in the queue as compacted
+    * and put it in the ready to clean state.
+    * @param info info on the compaction entry to mark as compacted.
+    */
+   @RetrySemantics.SafeToRetry
+   void markCompacted(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Find entries in the queue that are ready to
+    * be cleaned.
+    * @return information on the entry in the queue.
+    */
+   @RetrySemantics.ReadOnly
+   List<CompactionInfo> findReadyToClean() throws MetaException;
+ 
+   /**
+    * This will remove an entry from the queue after
+    * it has been compacted.
+    * 
+    * @param info info on the compaction entry to remove
+    */
+   @RetrySemantics.CannotRetry
+   void markCleaned(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Mark a compaction entry as failed.  This will move it to the compaction history queue with a
+    * failed status.  It will NOT clean up aborted transactions in the table/partition associated
+    * with this compaction.
+    * @param info information on the compaction that failed.
+    * @throws MetaException
+    */
+   @RetrySemantics.CannotRetry
+   void markFailed(CompactionInfo info) throws MetaException;
+ 
+   /**
+    * Clean up entries from TXN_TO_WRITE_ID table less than min_uncommited_txnid as found by
+    * min(NEXT_TXN_ID.ntxn_next, min(MIN_HISTORY_LEVEL.mhl_min_open_txnid), min(Aborted TXNS.txn_id)).
+    */
+   @RetrySemantics.SafeToRetry
+   void cleanTxnToWriteIdTable() throws MetaException;
+ 
+   /**
+    * Clean up aborted transactions from txns that have no components in txn_components.  The reson such
+    * txns exist can be that now work was done in this txn (e.g. Streaming opened TransactionBatch and
+    * abandoned it w/o doing any work) or due to {@link #markCleaned(CompactionInfo)} being called.
+    */
+   @RetrySemantics.SafeToRetry
+   void cleanEmptyAbortedTxns() throws MetaException;
+ 
+   /**
+    * This will take all entries assigned to workers
+    * on a host return them to INITIATED state.  The initiator should use this at start up to
+    * clean entries from any workers that were in the middle of compacting when the metastore
+    * shutdown.  It does not reset entries from worker threads on other hosts as those may still
+    * be working.
+    * @param hostname Name of this host.  It is assumed this prefixes the thread's worker id,
+    *                 so that like hostname% will match the worker id.
+    */
+   @RetrySemantics.Idempotent
+   void revokeFromLocalWorkers(String hostname) throws MetaException;
+ 
+   /**
+    * This call will return all compaction queue
+    * entries assigned to a worker but over the timeout back to the initiated state.
+    * This should be called by the initiator on start up and occasionally when running to clean up
+    * after dead threads.  At start up {@link #revokeFromLocalWorkers(String)} should be called
+    * first.
+    * @param timeout number of milliseconds since start time that should elapse before a worker is
+    *                declared dead.
+    */
+   @RetrySemantics.Idempotent
+   void revokeTimedoutWorkers(long timeout) throws MetaException;
+ 
+   /**
+    * Queries metastore DB directly to find columns in the table which have statistics information.
+    * If {@code ci} includes partition info then per partition stats info is examined, otherwise
+    * table level stats are examined.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   List<String> findColumnsWithStats(CompactionInfo ci) throws MetaException;
+ 
+   /**
+    * Record the highest write id that the {@code ci} compaction job will pay attention to.
+    */
+   @RetrySemantics.Idempotent
+   void setCompactionHighestWriteId(CompactionInfo ci, long highestWriteId) throws MetaException;
+ 
+   /**
+    * For any given compactable entity (partition, table if not partitioned) the history of compactions
+    * may look like "sssfffaaasffss", for example.  The idea is to retain the tail (most recent) of the
+    * history such that a configurable number of each type of state is present.  Any other entries
+    * can be purged.  This scheme has advantage of always retaining the last failure/success even if
+    * it's not recent.
+    * @throws MetaException
+    */
+   @RetrySemantics.SafeToRetry
+   void purgeCompactionHistory() throws MetaException;
+ 
+   /**
+    * WriteSet tracking is used to ensure proper transaction isolation.  This method deletes the 
+    * transaction metadata once it becomes unnecessary.  
+    */
+   @RetrySemantics.SafeToRetry
+   void performWriteSetGC();
+ 
+   /**
+    * Determine if there are enough consecutive failures compacting a table or partition that no
+    * new automatic compactions should be scheduled.  User initiated compactions do not do this
+    * check.
+    * @param ci  Table or partition to check.
+    * @return true if it is ok to compact, false if there have been too many failures.
+    * @throws MetaException
+    */
+   @RetrySemantics.ReadOnly
+   boolean checkFailedCompactions(CompactionInfo ci) throws MetaException;
+ 
+   @VisibleForTesting
+   int numLocksInLockTable() throws SQLException, MetaException;
+ 
+   @VisibleForTesting
+   long setTimeout(long milliseconds);
+ 
+   @RetrySemantics.Idempotent
+   MutexAPI getMutexAPI();
+ 
+   /**
+    * This is primarily designed to provide coarse grained mutex support to operations running
+    * inside the Metastore (of which there could be several instances).  The initial goal is to 
+    * ensure that various sub-processes of the Compactor don't step on each other.
+    * 
+    * In RDMBS world each {@code LockHandle} uses a java.sql.Connection so use it sparingly.
+    */
+   interface MutexAPI {
+     /**
+      * The {@code key} is name of the lock. Will acquire and exclusive lock or block.  It retuns
+      * a handle which must be used to release the lock.  Each invocation returns a new handle.
+      */
+     LockHandle acquireLock(String key) throws MetaException;
+ 
+     /**
+      * Same as {@link #acquireLock(String)} but takes an already existing handle as input.  This 
+      * will associate the lock on {@code key} with the same handle.  All locks associated with
+      * the same handle will be released together.
+      * @param handle not NULL
+      */
+     void acquireLock(String key, LockHandle handle) throws MetaException;
+     interface LockHandle {
+       /**
+        * Releases all locks associated with this handle.
+        */
+       void releaseLocks();
+     }
+   }
+ 
+   /**
+    * Once a {@link java.util.concurrent.ThreadPoolExecutor} Worker submits a job to the cluster,
+    * it calls this to update the metadata.
+    * @param id {@link CompactionInfo#id}
+    */
+   @RetrySemantics.Idempotent
+   void setHadoopJobId(String hadoopJobId, long id);
+ 
+   /**
+    * Add the ACID write event information to writeNotificationLog table.
+    * @param acidWriteEvent
+    */
+   @RetrySemantics.Idempotent
+   void addWriteNotificationLog(AcidWriteEvent acidWriteEvent) throws MetaException;
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
index 0000000,fa291d5..aac5811
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/txn/TxnUtils.java
@@@ -1,0 -1,471 +1,481 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  * <p/>
+  * http://www.apache.org/licenses/LICENSE-2.0
+  * <p/>
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore.txn;
+ 
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.common.ValidCompactorWriteIdList;
+ import org.apache.hadoop.hive.common.ValidReaderWriteIdList;
+ import org.apache.hadoop.hive.common.ValidReadTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnList;
+ import org.apache.hadoop.hive.common.ValidTxnWriteIdList;
+ import org.apache.hadoop.hive.common.ValidWriteIdList;
+ import org.apache.hadoop.hive.metastore.TransactionalValidationListener;
 -import org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse;
 -import org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse;
 -import org.apache.hadoop.hive.metastore.api.Table;
 -import org.apache.hadoop.hive.metastore.api.TableValidWriteIds;
 -import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
++import org.apache.hadoop.hive.metastore.api.*;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.apache.hadoop.hive.metastore.utils.JavaUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import java.util.Collections;
+ import java.util.ArrayList;
+ import java.util.Arrays;
+ import java.util.BitSet;
+ import java.util.List;
+ import java.util.Map;
+ 
+ public class TxnUtils {
+   private static final Logger LOG = LoggerFactory.getLogger(TxnUtils.class);
+ 
++  // Transactional stats states
++  static final public char STAT_OPEN = 'o';
++  static final public char STAT_INVALID = 'i';
++  static final public char STAT_COMMITTED = 'c';
++  static final public char STAT_OBSOLETE = 's';
++
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.GetOpenTxnsResponse} to a
+    * {@link org.apache.hadoop.hive.common.ValidTxnList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted transactions as invalid.
+    * @param txns txn list from the metastore
+    * @param currentTxn Current transaction that the user has open.  If this is greater than 0 it
+    *                   will be removed from the exceptions list so that the user sees his own
+    *                   transaction as valid.
+    * @return a valid txn list.
+    */
+   public static ValidTxnList createValidReadTxnList(GetOpenTxnsResponse txns, long currentTxn) {
+     /*
+      * The highWaterMark should be min(currentTxn,txns.getTxn_high_water_mark()) assuming currentTxn>0
+      * otherwise if currentTxn=7 and 8 commits before 7, then 7 will see result of 8 which
+      * doesn't make sense for Snapshot Isolation. Of course for Read Committed, the list should
+      * include the latest committed set.
+      */
+     long highWaterMark = (currentTxn > 0) ? Math.min(currentTxn, txns.getTxn_high_water_mark())
+                                           : txns.getTxn_high_water_mark();
+ 
+     // Open txns are already sorted in ascending order. This list may or may not include HWM
+     // but it is guaranteed that list won't have txn > HWM. But, if we overwrite the HWM with currentTxn
+     // then need to truncate the exceptions list accordingly.
+     List<Long> openTxns = txns.getOpen_txns();
+ 
+     // We care only about open/aborted txns below currentTxn and hence the size should be determined
+     // for the exceptions list. The currentTxn will be missing in openTxns list only in rare case like
+     // txn is aborted by AcidHouseKeeperService and compactor actually cleans up the aborted txns.
+     // So, for such cases, we get negative value for sizeToHwm with found position for currentTxn, and so,
+     // we just negate it to get the size.
+     int sizeToHwm = (currentTxn > 0) ? Collections.binarySearch(openTxns, currentTxn) : openTxns.size();
+     sizeToHwm = (sizeToHwm < 0) ? (-sizeToHwm) : sizeToHwm;
+     long[] exceptions = new long[sizeToHwm];
+     BitSet inAbortedBits = BitSet.valueOf(txns.getAbortedBits());
+     BitSet outAbortedBits = new BitSet();
+     long minOpenTxnId = Long.MAX_VALUE;
+     int i = 0;
+     for (long txn : openTxns) {
+       // For snapshot isolation, we don't care about txns greater than current txn and so stop here.
+       // Also, we need not include current txn to exceptions list.
+       if ((currentTxn > 0) && (txn >= currentTxn)) {
+         break;
+       }
+       if (inAbortedBits.get(i)) {
+         outAbortedBits.set(i);
+       } else if (minOpenTxnId == Long.MAX_VALUE) {
+         minOpenTxnId = txn;
+       }
+       exceptions[i++] = txn;
+     }
+     return new ValidReadTxnList(exceptions, outAbortedBits, highWaterMark, minOpenTxnId);
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.GetValidWriteIdsResponse} to a
+    * {@link org.apache.hadoop.hive.common.ValidTxnWriteIdList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted transactions as invalid.
+    * @param currentTxnId current txn ID for which we get the valid write ids list
+    * @param list valid write ids list from the metastore
+    * @return a valid write IDs list for the whole transaction.
+    */
+   public static ValidTxnWriteIdList createValidTxnWriteIdList(Long currentTxnId,
+                                                               List<TableValidWriteIds> validIds) {
+     ValidTxnWriteIdList validTxnWriteIdList = new ValidTxnWriteIdList(currentTxnId);
+     for (TableValidWriteIds tableWriteIds : validIds) {
+       validTxnWriteIdList.addTableValidWriteIdList(createValidReaderWriteIdList(tableWriteIds));
+     }
+     return validTxnWriteIdList;
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.TableValidWriteIds} to a
+    * {@link org.apache.hadoop.hive.common.ValidReaderWriteIdList}.  This assumes that the caller intends to
+    * read the files, and thus treats both open and aborted write ids as invalid.
+    * @param tableWriteIds valid write ids for the given table from the metastore
+    * @return a valid write IDs list for the input table
+    */
+   public static ValidReaderWriteIdList createValidReaderWriteIdList(TableValidWriteIds tableWriteIds) {
+     String fullTableName = tableWriteIds.getFullTableName();
+     long highWater = tableWriteIds.getWriteIdHighWaterMark();
+     List<Long> invalids = tableWriteIds.getInvalidWriteIds();
+     BitSet abortedBits = BitSet.valueOf(tableWriteIds.getAbortedBits());
+     long[] exceptions = new long[invalids.size()];
+     int i = 0;
+     for (long writeId : invalids) {
+       exceptions[i++] = writeId;
+     }
+     if (tableWriteIds.isSetMinOpenWriteId()) {
+       return new ValidReaderWriteIdList(fullTableName, exceptions, abortedBits, highWater,
+                                         tableWriteIds.getMinOpenWriteId());
+     } else {
+       return new ValidReaderWriteIdList(fullTableName, exceptions, abortedBits, highWater);
+     }
+   }
+ 
+   /**
+    * Transform a {@link org.apache.hadoop.hive.metastore.api.TableValidWriteIds} to a
+    * {@link org.apache.hadoop.hive.common.ValidCompactorWriteIdList}.  This assumes that the caller intends to
+    * compact the files, and thus treats only open transactions/write ids as invalid.  Additionally any
+    * writeId &gt; highestOpenWriteId is also invalid.  This is to avoid creating something like
+    * delta_17_120 where writeId 80, for example, is still open.
+    * @param tableValidWriteIds table write id list from the metastore
+    * @return a valid write id list.
+    */
+   public static ValidCompactorWriteIdList createValidCompactWriteIdList(TableValidWriteIds tableValidWriteIds) {
+     String fullTableName = tableValidWriteIds.getFullTableName();
+     long highWater = tableValidWriteIds.getWriteIdHighWaterMark();
+     long minOpenWriteId = Long.MAX_VALUE;
+     List<Long> invalids = tableValidWriteIds.getInvalidWriteIds();
+     BitSet abortedBits = BitSet.valueOf(tableValidWriteIds.getAbortedBits());
+     long[] exceptions = new long[invalids.size()];
+     int i = 0;
+     for (long writeId : invalids) {
+       if (abortedBits.get(i)) {
+         // Only need aborted since we don't consider anything above minOpenWriteId
+         exceptions[i++] = writeId;
+       } else {
+         minOpenWriteId = Math.min(minOpenWriteId, writeId);
+       }
+     }
+     if(i < exceptions.length) {
+       exceptions = Arrays.copyOf(exceptions, i);
+     }
+     highWater = minOpenWriteId == Long.MAX_VALUE ? highWater : minOpenWriteId - 1;
+     BitSet bitSet = new BitSet(exceptions.length);
+     bitSet.set(0, exceptions.length); // for ValidCompactorWriteIdList, everything in exceptions are aborted
+     if (minOpenWriteId == Long.MAX_VALUE) {
+       return new ValidCompactorWriteIdList(fullTableName, exceptions, bitSet, highWater);
+     } else {
+       return new ValidCompactorWriteIdList(fullTableName, exceptions, bitSet, highWater, minOpenWriteId);
+     }
+   }
+ 
+   public static ValidReaderWriteIdList updateForCompactionQuery(ValidReaderWriteIdList ids) {
+     // This is based on the existing valid write ID list that was built for a select query;
+     // therefore we assume all the aborted txns, etc. were already accounted for.
+     // All we do is adjust the high watermark to only include contiguous txns.
+     Long minOpenWriteId = ids.getMinOpenWriteId();
+     if (minOpenWriteId != null && minOpenWriteId != Long.MAX_VALUE) {
+       return ids.updateHighWatermark(ids.getMinOpenWriteId() - 1);
+     }
+     return ids;
+   }
+ 
+   /**
+    * Get an instance of the TxnStore that is appropriate for this store
+    * @param conf configuration
+    * @return txn store
+    */
+   public static TxnStore getTxnStore(Configuration conf) {
+     String className = MetastoreConf.getVar(conf, ConfVars.TXN_STORE_IMPL);
+     try {
+       TxnStore handler = JavaUtils.getClass(className, TxnStore.class).newInstance();
+       handler.setConf(conf);
+       return handler;
+     } catch (Exception e) {
+       LOG.error("Unable to instantiate raw store directly in fastpath mode", e);
+       throw new RuntimeException(e);
+     }
+   }
+ 
+   /**
+    * Note, users are responsible for using the correct TxnManager. We do not look at
+    * SessionState.get().getTxnMgr().supportsAcid() here
+    * Should produce the same result as
+    * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isTransactionalTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+    * @return true if table is a transactional table, false otherwise
+    */
+   public static boolean isTransactionalTable(Table table) {
+     if (table == null) {
+       return false;
+     }
+     Map<String, String> parameters = table.getParameters();
+     String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
++    return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
++  }
++
++  public static boolean isTransactionalTable(Map<String, String> parameters) {
++    if (parameters == null) {
++      return false;
++    }
++    String tableIsTransactional = parameters.get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL);
+     return tableIsTransactional != null && tableIsTransactional.equalsIgnoreCase("true");
+   }
+ 
+   /**
+    * Should produce the same result as
+    * {@link org.apache.hadoop.hive.ql.io.AcidUtils#isAcidTable(org.apache.hadoop.hive.ql.metadata.Table)}.
+    */
+   public static boolean isAcidTable(Table table) {
+     return TxnUtils.isTransactionalTable(table) &&
+       TransactionalValidationListener.DEFAULT_TRANSACTIONAL_PROPERTY.equals(table.getParameters()
+       .get(hive_metastoreConstants.TABLE_TRANSACTIONAL_PROPERTIES));
+   }
+ 
+   /**
+    * Should produce the result as <dbName>.<tableName>.
+    */
+   public static String getFullTableName(String dbName, String tableName) {
+     return dbName.toLowerCase() + "." + tableName.toLowerCase();
+   }
+ 
+   public static String[] getDbTableName(String fullTableName) {
+     return fullTableName.split("\\.");
+   }
+ 
+ 
+ 
+   /**
+    * Build a query (or queries if one query is too big but only for the case of 'IN'
+    * composite clause. For the case of 'NOT IN' clauses, multiple queries change
+    * the semantics of the intended query.
+    * E.g., Let's assume that input "inList" parameter has [5, 6] and that
+    * _DIRECT_SQL_MAX_QUERY_LENGTH_ configuration parameter only allows one value in a 'NOT IN' clause,
+    * Then having two delete statements changes the semantics of the inteneded SQL statement.
+    * I.e. 'delete from T where a not in (5)' and 'delete from T where a not in (6)' sequence
+    * is not equal to 'delete from T where a not in (5, 6)'.)
+    * with one or multiple 'IN' or 'NOT IN' clauses with the given input parameters.
+    *
+    * Note that this method currently support only single column for
+    * IN/NOT IN clauses and that only covers OR-based composite 'IN' clause and
+    * AND-based composite 'NOT IN' clause.
+    * For example, for 'IN' clause case, the method will build a query with OR.
+    * E.g., "id in (1,2,3) OR id in (4,5,6)".
+    * For 'NOT IN' case, NOT IN list is broken into multiple 'NOT IN" clauses connected by AND.
+    *
+    * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN'
+    * clauses in a query".
+    *
+    * @param queries   OUT: Array of query strings
+    * @param prefix    IN:  Part of the query that comes before IN list
+    * @param suffix    IN:  Part of the query that comes after IN list
+    * @param inList    IN:  the list with IN list values
+    * @param inColumn  IN:  single column name of IN list operator
+    * @param addParens IN:  add a pair of parenthesis outside the IN lists
+    *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
+    * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
+    * @return          OUT: a list of the count of IN list values that are in each of the corresponding queries
+    */
+   public static List<Integer> buildQueryWithINClause(Configuration conf,
+                                             List<String> queries,
+                                             StringBuilder prefix,
+                                             StringBuilder suffix,
+                                             List<Long> inList,
+                                             String inColumn,
+                                             boolean addParens,
+                                             boolean notIn) {
+     List<String> inListStrings = new ArrayList<>(inList.size());
+     for (Long aLong : inList) {
+       inListStrings.add(aLong.toString());
+     }
+     return buildQueryWithINClauseStrings(conf, queries, prefix, suffix,
+         inListStrings, inColumn, addParens, notIn);
+ 
+   }
+   /**
+    * Build a query (or queries if one query is too big but only for the case of 'IN'
+    * composite clause. For the case of 'NOT IN' clauses, multiple queries change
+    * the semantics of the intended query.
+    * E.g., Let's assume that input "inList" parameter has [5, 6] and that
+    * _DIRECT_SQL_MAX_QUERY_LENGTH_ configuration parameter only allows one value in a 'NOT IN' clause,
+    * Then having two delete statements changes the semantics of the inteneded SQL statement.
+    * I.e. 'delete from T where a not in (5)' and 'delete from T where a not in (6)' sequence
+    * is not equal to 'delete from T where a not in (5, 6)'.)
+    * with one or multiple 'IN' or 'NOT IN' clauses with the given input parameters.
+    *
+    * Note that this method currently support only single column for
+    * IN/NOT IN clauses and that only covers OR-based composite 'IN' clause and
+    * AND-based composite 'NOT IN' clause.
+    * For example, for 'IN' clause case, the method will build a query with OR.
+    * E.g., "id in (1,2,3) OR id in (4,5,6)".
+    * For 'NOT IN' case, NOT IN list is broken into multiple 'NOT IN" clauses connected by AND.
+    *
+    * Note that, in this method, "a composite 'IN' clause" is defined as "a list of multiple 'IN'
+    * clauses in a query".
+    *
+    * @param queries   OUT: Array of query strings
+    * @param prefix    IN:  Part of the query that comes before IN list
+    * @param suffix    IN:  Part of the query that comes after IN list
+    * @param inList    IN:  the list with IN list values
+    * @param inColumn  IN:  single column name of IN list operator
+    * @param addParens IN:  add a pair of parenthesis outside the IN lists
+    *                       e.g. "(id in (1,2,3) OR id in (4,5,6))"
+    * @param notIn     IN:  is this for building a 'NOT IN' composite clause?
+    * @return          OUT: a list of the count of IN list values that are in each of the corresponding queries
+    */
+   public static List<Integer> buildQueryWithINClauseStrings(Configuration conf, List<String> queries, StringBuilder prefix,
+       StringBuilder suffix, List<String> inList, String inColumn, boolean addParens, boolean notIn) {
+     // Get configuration parameters
+     int maxQueryLength = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH);
+     int batchSize = MetastoreConf.getIntVar(conf, ConfVars.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE);
+ 
+     // Check parameter set validity as a public method.
+     if (inList == null || inList.size() == 0 || maxQueryLength <= 0 || batchSize <= 0) {
+       throw new IllegalArgumentException("The IN list is empty!");
+     }
+ 
+     // Define constants and local variables.
+     int inListSize = inList.size();
+     StringBuilder buf = new StringBuilder();
+ 
+     int cursor4InListArray = 0,  // cursor for the "inList" array.
+         cursor4InClauseElements = 0,  // cursor for an element list per an 'IN'/'NOT IN'-clause.
+         cursor4queryOfInClauses = 0;  // cursor for in-clause lists per a query.
+     boolean nextItemNeeded = true;
+     boolean newInclausePrefixJustAppended = false;
+     StringBuilder nextValue = new StringBuilder("");
+     StringBuilder newInclausePrefix =
+       new StringBuilder(notIn ? " and " + inColumn + " not in (":
+ 	                        " or " + inColumn + " in (");
+     List<Integer> ret = new ArrayList<>();
+     int currentCount = 0;
+ 
+     // Loop over the given inList elements.
+     while( cursor4InListArray < inListSize || !nextItemNeeded) {
+       if (cursor4queryOfInClauses == 0) {
+         // Append prefix
+         buf.append(prefix);
+         if (addParens) {
+           buf.append("(");
+         }
+         buf.append(inColumn);
+ 
+         if (notIn) {
+           buf.append(" not in (");
+         } else {
+           buf.append(" in (");
+         }
+         cursor4queryOfInClauses++;
+         newInclausePrefixJustAppended = false;
+       }
+ 
+       // Get the next "inList" value element if needed.
+       if (nextItemNeeded) {
+         nextValue.setLength(0);
+         nextValue.append(String.valueOf(inList.get(cursor4InListArray++)));
+         nextItemNeeded = false;
+       }
+ 
+       // Compute the size of a query when the 'nextValue' is added to the current query.
+       int querySize = querySizeExpected(buf.length(), nextValue.length(), suffix.length(), addParens);
+ 
+       if (querySize > maxQueryLength * 1024) {
+         // Check an edge case where the DIRECT_SQL_MAX_QUERY_LENGTH does not allow one 'IN' clause with single value.
+         if (cursor4queryOfInClauses == 1 && cursor4InClauseElements == 0) {
+           throw new IllegalArgumentException("The current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + " is set too small to have one IN clause with single value!");
+         }
+ 
+         // Check en edge case to throw Exception if we can not build a single query for 'NOT IN' clause cases as mentioned at the method comments.
+         if (notIn) {
+           throw new IllegalArgumentException("The NOT IN list has too many elements for the current " + ConfVars.DIRECT_SQL_MAX_QUERY_LENGTH.getVarname() + "!");
+         }
+ 
+         // Wrap up the current query string since we can not add another "inList" element value.
+         if (newInclausePrefixJustAppended) {
+           buf.delete(buf.length()-newInclausePrefix.length(), buf.length());
+         }
+ 
+         buf.setCharAt(buf.length() - 1, ')'); // replace the "commar" to finish a 'IN' clause string.
+ 
+         if (addParens) {
+           buf.append(")");
+         }
+ 
+         buf.append(suffix);
+         queries.add(buf.toString());
+         ret.add(currentCount);
+ 
+         // Prepare a new query string.
+         buf.setLength(0);
+         currentCount = 0;
+         cursor4queryOfInClauses = cursor4InClauseElements = 0;
+         querySize = 0;
+         newInclausePrefixJustAppended = false;
+         continue;
+       } else if (cursor4InClauseElements >= batchSize-1 && cursor4InClauseElements != 0) {
+         // Finish the current 'IN'/'NOT IN' clause and start a new clause.
+         buf.setCharAt(buf.length() - 1, ')'); // replace the "commar".
+         buf.append(newInclausePrefix.toString());
+ 
+         newInclausePrefixJustAppended = true;
+ 
+         // increment cursor for per-query IN-clause list
+         cursor4queryOfInClauses++;
+         cursor4InClauseElements = 0;
+       } else {
+         buf.append(nextValue.toString()).append(",");
+         currentCount++;
+         nextItemNeeded = true;
+         newInclausePrefixJustAppended = false;
+         // increment cursor for elements per 'IN'/'NOT IN' clause.
+         cursor4InClauseElements++;
+       }
+     }
+ 
+     // Finish the last query.
+     if (newInclausePrefixJustAppended) {
+         buf.delete(buf.length()-newInclausePrefix.length(), buf.length());
+       }
+     buf.setCharAt(buf.length() - 1, ')'); // replace the commar.
+     if (addParens) {
+       buf.append(")");
+     }
+     buf.append(suffix);
+     queries.add(buf.toString());
+     ret.add(currentCount);
+     return ret;
+   }
+ 
+   /**
+    * Compute and return the size of a query statement with the given parameters as input variables.
+    *
+    * @param sizeSoFar     size of the current contents of the buf
+    * @param sizeNextItem      size of the next 'IN' clause element value.
+    * @param suffixSize    size of the suffix for a quey statement
+    * @param addParens     Do we add an additional parenthesis?
+    */
+   private static int querySizeExpected(int sizeSoFar,
+                                        int sizeNextItem,
+                                        int suffixSize,
+                                        boolean addParens) {
+ 
+     int size = sizeSoFar + sizeNextItem + suffixSize;
+ 
+     if (addParens) {
+        size++;
+     }
+ 
+     return size;
+   }
+ }


[46/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
new file mode 100644
index 0000000..7f06b3b
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/hive_metastore_types.cpp
@@ -0,0 +1,33217 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "hive_metastore_types.h"
+
+#include <algorithm>
+#include <ostream>
+
+#include <thrift/TToString.h>
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+int _kHiveObjectTypeValues[] = {
+  HiveObjectType::GLOBAL,
+  HiveObjectType::DATABASE,
+  HiveObjectType::TABLE,
+  HiveObjectType::PARTITION,
+  HiveObjectType::COLUMN
+};
+const char* _kHiveObjectTypeNames[] = {
+  "GLOBAL",
+  "DATABASE",
+  "TABLE",
+  "PARTITION",
+  "COLUMN"
+};
+const std::map<int, const char*> _HiveObjectType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(5, _kHiveObjectTypeValues, _kHiveObjectTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kPrincipalTypeValues[] = {
+  PrincipalType::USER,
+  PrincipalType::ROLE,
+  PrincipalType::GROUP
+};
+const char* _kPrincipalTypeNames[] = {
+  "USER",
+  "ROLE",
+  "GROUP"
+};
+const std::map<int, const char*> _PrincipalType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kPrincipalTypeValues, _kPrincipalTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kPartitionEventTypeValues[] = {
+  PartitionEventType::LOAD_DONE
+};
+const char* _kPartitionEventTypeNames[] = {
+  "LOAD_DONE"
+};
+const std::map<int, const char*> _PartitionEventType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(1, _kPartitionEventTypeValues, _kPartitionEventTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kTxnStateValues[] = {
+  TxnState::COMMITTED,
+  TxnState::ABORTED,
+  TxnState::OPEN
+};
+const char* _kTxnStateNames[] = {
+  "COMMITTED",
+  "ABORTED",
+  "OPEN"
+};
+const std::map<int, const char*> _TxnState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kTxnStateValues, _kTxnStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kLockLevelValues[] = {
+  LockLevel::DB,
+  LockLevel::TABLE,
+  LockLevel::PARTITION
+};
+const char* _kLockLevelNames[] = {
+  "DB",
+  "TABLE",
+  "PARTITION"
+};
+const std::map<int, const char*> _LockLevel_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kLockLevelValues, _kLockLevelNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kLockStateValues[] = {
+  LockState::ACQUIRED,
+  LockState::WAITING,
+  LockState::ABORT,
+  LockState::NOT_ACQUIRED
+};
+const char* _kLockStateNames[] = {
+  "ACQUIRED",
+  "WAITING",
+  "ABORT",
+  "NOT_ACQUIRED"
+};
+const std::map<int, const char*> _LockState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(4, _kLockStateValues, _kLockStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kLockTypeValues[] = {
+  LockType::SHARED_READ,
+  LockType::SHARED_WRITE,
+  LockType::EXCLUSIVE
+};
+const char* _kLockTypeNames[] = {
+  "SHARED_READ",
+  "SHARED_WRITE",
+  "EXCLUSIVE"
+};
+const std::map<int, const char*> _LockType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kLockTypeValues, _kLockTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kCompactionTypeValues[] = {
+  CompactionType::MINOR,
+  CompactionType::MAJOR
+};
+const char* _kCompactionTypeNames[] = {
+  "MINOR",
+  "MAJOR"
+};
+const std::map<int, const char*> _CompactionType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kCompactionTypeValues, _kCompactionTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kGrantRevokeTypeValues[] = {
+  GrantRevokeType::GRANT,
+  GrantRevokeType::REVOKE
+};
+const char* _kGrantRevokeTypeNames[] = {
+  "GRANT",
+  "REVOKE"
+};
+const std::map<int, const char*> _GrantRevokeType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kGrantRevokeTypeValues, _kGrantRevokeTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kDataOperationTypeValues[] = {
+  DataOperationType::SELECT,
+  DataOperationType::INSERT,
+  DataOperationType::UPDATE,
+  DataOperationType::DELETE,
+  DataOperationType::UNSET,
+  DataOperationType::NO_TXN
+};
+const char* _kDataOperationTypeNames[] = {
+  "SELECT",
+  "INSERT",
+  "UPDATE",
+  "DELETE",
+  "UNSET",
+  "NO_TXN"
+};
+const std::map<int, const char*> _DataOperationType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(6, _kDataOperationTypeValues, _kDataOperationTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kEventRequestTypeValues[] = {
+  EventRequestType::INSERT,
+  EventRequestType::UPDATE,
+  EventRequestType::DELETE
+};
+const char* _kEventRequestTypeNames[] = {
+  "INSERT",
+  "UPDATE",
+  "DELETE"
+};
+const std::map<int, const char*> _EventRequestType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kEventRequestTypeValues, _kEventRequestTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSerdeTypeValues[] = {
+  SerdeType::HIVE,
+  SerdeType::SCHEMA_REGISTRY
+};
+const char* _kSerdeTypeNames[] = {
+  "HIVE",
+  "SCHEMA_REGISTRY"
+};
+const std::map<int, const char*> _SerdeType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kSerdeTypeValues, _kSerdeTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaTypeValues[] = {
+  SchemaType::HIVE,
+  SchemaType::AVRO
+};
+const char* _kSchemaTypeNames[] = {
+  "HIVE",
+  "AVRO"
+};
+const std::map<int, const char*> _SchemaType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kSchemaTypeValues, _kSchemaTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaCompatibilityValues[] = {
+  SchemaCompatibility::NONE,
+  SchemaCompatibility::BACKWARD,
+  SchemaCompatibility::FORWARD,
+  SchemaCompatibility::BOTH
+};
+const char* _kSchemaCompatibilityNames[] = {
+  "NONE",
+  "BACKWARD",
+  "FORWARD",
+  "BOTH"
+};
+const std::map<int, const char*> _SchemaCompatibility_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(4, _kSchemaCompatibilityValues, _kSchemaCompatibilityNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaValidationValues[] = {
+  SchemaValidation::LATEST,
+  SchemaValidation::ALL
+};
+const char* _kSchemaValidationNames[] = {
+  "LATEST",
+  "ALL"
+};
+const std::map<int, const char*> _SchemaValidation_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kSchemaValidationValues, _kSchemaValidationNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kSchemaVersionStateValues[] = {
+  SchemaVersionState::INITIATED,
+  SchemaVersionState::START_REVIEW,
+  SchemaVersionState::CHANGES_REQUIRED,
+  SchemaVersionState::REVIEWED,
+  SchemaVersionState::ENABLED,
+  SchemaVersionState::DISABLED,
+  SchemaVersionState::ARCHIVED,
+  SchemaVersionState::DELETED
+};
+const char* _kSchemaVersionStateNames[] = {
+  "INITIATED",
+  "START_REVIEW",
+  "CHANGES_REQUIRED",
+  "REVIEWED",
+  "ENABLED",
+  "DISABLED",
+  "ARCHIVED",
+  "DELETED"
+};
+const std::map<int, const char*> _SchemaVersionState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(8, _kSchemaVersionStateValues, _kSchemaVersionStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kFunctionTypeValues[] = {
+  FunctionType::JAVA
+};
+const char* _kFunctionTypeNames[] = {
+  "JAVA"
+};
+const std::map<int, const char*> _FunctionType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(1, _kFunctionTypeValues, _kFunctionTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kResourceTypeValues[] = {
+  ResourceType::JAR,
+  ResourceType::FILE,
+  ResourceType::ARCHIVE
+};
+const char* _kResourceTypeNames[] = {
+  "JAR",
+  "FILE",
+  "ARCHIVE"
+};
+const std::map<int, const char*> _ResourceType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kResourceTypeValues, _kResourceTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kFileMetadataExprTypeValues[] = {
+  FileMetadataExprType::ORC_SARG
+};
+const char* _kFileMetadataExprTypeNames[] = {
+  "ORC_SARG"
+};
+const std::map<int, const char*> _FileMetadataExprType_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(1, _kFileMetadataExprTypeValues, _kFileMetadataExprTypeNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kClientCapabilityValues[] = {
+  ClientCapability::TEST_CAPABILITY,
+  ClientCapability::INSERT_ONLY_TABLES
+};
+const char* _kClientCapabilityNames[] = {
+  "TEST_CAPABILITY",
+  "INSERT_ONLY_TABLES"
+};
+const std::map<int, const char*> _ClientCapability_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kClientCapabilityValues, _kClientCapabilityNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kWMResourcePlanStatusValues[] = {
+  WMResourcePlanStatus::ACTIVE,
+  WMResourcePlanStatus::ENABLED,
+  WMResourcePlanStatus::DISABLED
+};
+const char* _kWMResourcePlanStatusNames[] = {
+  "ACTIVE",
+  "ENABLED",
+  "DISABLED"
+};
+const std::map<int, const char*> _WMResourcePlanStatus_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(3, _kWMResourcePlanStatusValues, _kWMResourcePlanStatusNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+int _kWMPoolSchedulingPolicyValues[] = {
+  WMPoolSchedulingPolicy::FAIR,
+  WMPoolSchedulingPolicy::FIFO
+};
+const char* _kWMPoolSchedulingPolicyNames[] = {
+  "FAIR",
+  "FIFO"
+};
+const std::map<int, const char*> _WMPoolSchedulingPolicy_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kWMPoolSchedulingPolicyValues, _kWMPoolSchedulingPolicyNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL));
+
+
+Version::~Version() throw() {
+}
+
+
+void Version::__set_version(const std::string& val) {
+  this->version = val;
+}
+
+void Version::__set_comments(const std::string& val) {
+  this->comments = val;
+}
+
+uint32_t Version::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->version);
+          this->__isset.version = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->comments);
+          this->__isset.comments = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t Version::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("Version");
+
+  xfer += oprot->writeFieldBegin("version", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->version);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("comments", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->comments);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(Version &a, Version &b) {
+  using ::std::swap;
+  swap(a.version, b.version);
+  swap(a.comments, b.comments);
+  swap(a.__isset, b.__isset);
+}
+
+Version::Version(const Version& other0) {
+  version = other0.version;
+  comments = other0.comments;
+  __isset = other0.__isset;
+}
+Version& Version::operator=(const Version& other1) {
+  version = other1.version;
+  comments = other1.comments;
+  __isset = other1.__isset;
+  return *this;
+}
+void Version::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "Version(";
+  out << "version=" << to_string(version);
+  out << ", " << "comments=" << to_string(comments);
+  out << ")";
+}
+
+
+FieldSchema::~FieldSchema() throw() {
+}
+
+
+void FieldSchema::__set_name(const std::string& val) {
+  this->name = val;
+}
+
+void FieldSchema::__set_type(const std::string& val) {
+  this->type = val;
+}
+
+void FieldSchema::__set_comment(const std::string& val) {
+  this->comment = val;
+}
+
+uint32_t FieldSchema::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->name);
+          this->__isset.name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->type);
+          this->__isset.type = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->comment);
+          this->__isset.comment = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t FieldSchema::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("FieldSchema");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->type);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("comment", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->comment);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(FieldSchema &a, FieldSchema &b) {
+  using ::std::swap;
+  swap(a.name, b.name);
+  swap(a.type, b.type);
+  swap(a.comment, b.comment);
+  swap(a.__isset, b.__isset);
+}
+
+FieldSchema::FieldSchema(const FieldSchema& other2) {
+  name = other2.name;
+  type = other2.type;
+  comment = other2.comment;
+  __isset = other2.__isset;
+}
+FieldSchema& FieldSchema::operator=(const FieldSchema& other3) {
+  name = other3.name;
+  type = other3.type;
+  comment = other3.comment;
+  __isset = other3.__isset;
+  return *this;
+}
+void FieldSchema::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "FieldSchema(";
+  out << "name=" << to_string(name);
+  out << ", " << "type=" << to_string(type);
+  out << ", " << "comment=" << to_string(comment);
+  out << ")";
+}
+
+
+SQLPrimaryKey::~SQLPrimaryKey() throw() {
+}
+
+
+void SQLPrimaryKey::__set_table_db(const std::string& val) {
+  this->table_db = val;
+}
+
+void SQLPrimaryKey::__set_table_name(const std::string& val) {
+  this->table_name = val;
+}
+
+void SQLPrimaryKey::__set_column_name(const std::string& val) {
+  this->column_name = val;
+}
+
+void SQLPrimaryKey::__set_key_seq(const int32_t val) {
+  this->key_seq = val;
+}
+
+void SQLPrimaryKey::__set_pk_name(const std::string& val) {
+  this->pk_name = val;
+}
+
+void SQLPrimaryKey::__set_enable_cstr(const bool val) {
+  this->enable_cstr = val;
+}
+
+void SQLPrimaryKey::__set_validate_cstr(const bool val) {
+  this->validate_cstr = val;
+}
+
+void SQLPrimaryKey::__set_rely_cstr(const bool val) {
+  this->rely_cstr = val;
+}
+
+void SQLPrimaryKey::__set_catName(const std::string& val) {
+  this->catName = val;
+__isset.catName = true;
+}
+
+uint32_t SQLPrimaryKey::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_db);
+          this->__isset.table_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_name);
+          this->__isset.table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->column_name);
+          this->__isset.column_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->key_seq);
+          this->__isset.key_seq = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->pk_name);
+          this->__isset.pk_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->enable_cstr);
+          this->__isset.enable_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->validate_cstr);
+          this->__isset.validate_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->rely_cstr);
+          this->__isset.rely_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 9:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t SQLPrimaryKey::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("SQLPrimaryKey");
+
+  xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->table_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->column_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 4);
+  xfer += oprot->writeI32(this->key_seq);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("pk_name", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->pk_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6);
+  xfer += oprot->writeBool(this->enable_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7);
+  xfer += oprot->writeBool(this->validate_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8);
+  xfer += oprot->writeBool(this->rely_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  if (this->__isset.catName) {
+    xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 9);
+    xfer += oprot->writeString(this->catName);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(SQLPrimaryKey &a, SQLPrimaryKey &b) {
+  using ::std::swap;
+  swap(a.table_db, b.table_db);
+  swap(a.table_name, b.table_name);
+  swap(a.column_name, b.column_name);
+  swap(a.key_seq, b.key_seq);
+  swap(a.pk_name, b.pk_name);
+  swap(a.enable_cstr, b.enable_cstr);
+  swap(a.validate_cstr, b.validate_cstr);
+  swap(a.rely_cstr, b.rely_cstr);
+  swap(a.catName, b.catName);
+  swap(a.__isset, b.__isset);
+}
+
+SQLPrimaryKey::SQLPrimaryKey(const SQLPrimaryKey& other4) {
+  table_db = other4.table_db;
+  table_name = other4.table_name;
+  column_name = other4.column_name;
+  key_seq = other4.key_seq;
+  pk_name = other4.pk_name;
+  enable_cstr = other4.enable_cstr;
+  validate_cstr = other4.validate_cstr;
+  rely_cstr = other4.rely_cstr;
+  catName = other4.catName;
+  __isset = other4.__isset;
+}
+SQLPrimaryKey& SQLPrimaryKey::operator=(const SQLPrimaryKey& other5) {
+  table_db = other5.table_db;
+  table_name = other5.table_name;
+  column_name = other5.column_name;
+  key_seq = other5.key_seq;
+  pk_name = other5.pk_name;
+  enable_cstr = other5.enable_cstr;
+  validate_cstr = other5.validate_cstr;
+  rely_cstr = other5.rely_cstr;
+  catName = other5.catName;
+  __isset = other5.__isset;
+  return *this;
+}
+void SQLPrimaryKey::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "SQLPrimaryKey(";
+  out << "table_db=" << to_string(table_db);
+  out << ", " << "table_name=" << to_string(table_name);
+  out << ", " << "column_name=" << to_string(column_name);
+  out << ", " << "key_seq=" << to_string(key_seq);
+  out << ", " << "pk_name=" << to_string(pk_name);
+  out << ", " << "enable_cstr=" << to_string(enable_cstr);
+  out << ", " << "validate_cstr=" << to_string(validate_cstr);
+  out << ", " << "rely_cstr=" << to_string(rely_cstr);
+  out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+  out << ")";
+}
+
+
+SQLForeignKey::~SQLForeignKey() throw() {
+}
+
+
+void SQLForeignKey::__set_pktable_db(const std::string& val) {
+  this->pktable_db = val;
+}
+
+void SQLForeignKey::__set_pktable_name(const std::string& val) {
+  this->pktable_name = val;
+}
+
+void SQLForeignKey::__set_pkcolumn_name(const std::string& val) {
+  this->pkcolumn_name = val;
+}
+
+void SQLForeignKey::__set_fktable_db(const std::string& val) {
+  this->fktable_db = val;
+}
+
+void SQLForeignKey::__set_fktable_name(const std::string& val) {
+  this->fktable_name = val;
+}
+
+void SQLForeignKey::__set_fkcolumn_name(const std::string& val) {
+  this->fkcolumn_name = val;
+}
+
+void SQLForeignKey::__set_key_seq(const int32_t val) {
+  this->key_seq = val;
+}
+
+void SQLForeignKey::__set_update_rule(const int32_t val) {
+  this->update_rule = val;
+}
+
+void SQLForeignKey::__set_delete_rule(const int32_t val) {
+  this->delete_rule = val;
+}
+
+void SQLForeignKey::__set_fk_name(const std::string& val) {
+  this->fk_name = val;
+}
+
+void SQLForeignKey::__set_pk_name(const std::string& val) {
+  this->pk_name = val;
+}
+
+void SQLForeignKey::__set_enable_cstr(const bool val) {
+  this->enable_cstr = val;
+}
+
+void SQLForeignKey::__set_validate_cstr(const bool val) {
+  this->validate_cstr = val;
+}
+
+void SQLForeignKey::__set_rely_cstr(const bool val) {
+  this->rely_cstr = val;
+}
+
+void SQLForeignKey::__set_catName(const std::string& val) {
+  this->catName = val;
+__isset.catName = true;
+}
+
+uint32_t SQLForeignKey::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->pktable_db);
+          this->__isset.pktable_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->pktable_name);
+          this->__isset.pktable_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->pkcolumn_name);
+          this->__isset.pkcolumn_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->fktable_db);
+          this->__isset.fktable_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->fktable_name);
+          this->__isset.fktable_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->fkcolumn_name);
+          this->__isset.fkcolumn_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->key_seq);
+          this->__isset.key_seq = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->update_rule);
+          this->__isset.update_rule = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 9:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->delete_rule);
+          this->__isset.delete_rule = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 10:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->fk_name);
+          this->__isset.fk_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 11:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->pk_name);
+          this->__isset.pk_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 12:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->enable_cstr);
+          this->__isset.enable_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 13:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->validate_cstr);
+          this->__isset.validate_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 14:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->rely_cstr);
+          this->__isset.rely_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 15:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t SQLForeignKey::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("SQLForeignKey");
+
+  xfer += oprot->writeFieldBegin("pktable_db", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->pktable_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("pktable_name", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->pktable_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("pkcolumn_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->pkcolumn_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("fktable_db", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString(this->fktable_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("fktable_name", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->fktable_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("fkcolumn_name", ::apache::thrift::protocol::T_STRING, 6);
+  xfer += oprot->writeString(this->fkcolumn_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 7);
+  xfer += oprot->writeI32(this->key_seq);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("update_rule", ::apache::thrift::protocol::T_I32, 8);
+  xfer += oprot->writeI32(this->update_rule);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("delete_rule", ::apache::thrift::protocol::T_I32, 9);
+  xfer += oprot->writeI32(this->delete_rule);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("fk_name", ::apache::thrift::protocol::T_STRING, 10);
+  xfer += oprot->writeString(this->fk_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("pk_name", ::apache::thrift::protocol::T_STRING, 11);
+  xfer += oprot->writeString(this->pk_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 12);
+  xfer += oprot->writeBool(this->enable_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 13);
+  xfer += oprot->writeBool(this->validate_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 14);
+  xfer += oprot->writeBool(this->rely_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  if (this->__isset.catName) {
+    xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 15);
+    xfer += oprot->writeString(this->catName);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(SQLForeignKey &a, SQLForeignKey &b) {
+  using ::std::swap;
+  swap(a.pktable_db, b.pktable_db);
+  swap(a.pktable_name, b.pktable_name);
+  swap(a.pkcolumn_name, b.pkcolumn_name);
+  swap(a.fktable_db, b.fktable_db);
+  swap(a.fktable_name, b.fktable_name);
+  swap(a.fkcolumn_name, b.fkcolumn_name);
+  swap(a.key_seq, b.key_seq);
+  swap(a.update_rule, b.update_rule);
+  swap(a.delete_rule, b.delete_rule);
+  swap(a.fk_name, b.fk_name);
+  swap(a.pk_name, b.pk_name);
+  swap(a.enable_cstr, b.enable_cstr);
+  swap(a.validate_cstr, b.validate_cstr);
+  swap(a.rely_cstr, b.rely_cstr);
+  swap(a.catName, b.catName);
+  swap(a.__isset, b.__isset);
+}
+
+SQLForeignKey::SQLForeignKey(const SQLForeignKey& other6) {
+  pktable_db = other6.pktable_db;
+  pktable_name = other6.pktable_name;
+  pkcolumn_name = other6.pkcolumn_name;
+  fktable_db = other6.fktable_db;
+  fktable_name = other6.fktable_name;
+  fkcolumn_name = other6.fkcolumn_name;
+  key_seq = other6.key_seq;
+  update_rule = other6.update_rule;
+  delete_rule = other6.delete_rule;
+  fk_name = other6.fk_name;
+  pk_name = other6.pk_name;
+  enable_cstr = other6.enable_cstr;
+  validate_cstr = other6.validate_cstr;
+  rely_cstr = other6.rely_cstr;
+  catName = other6.catName;
+  __isset = other6.__isset;
+}
+SQLForeignKey& SQLForeignKey::operator=(const SQLForeignKey& other7) {
+  pktable_db = other7.pktable_db;
+  pktable_name = other7.pktable_name;
+  pkcolumn_name = other7.pkcolumn_name;
+  fktable_db = other7.fktable_db;
+  fktable_name = other7.fktable_name;
+  fkcolumn_name = other7.fkcolumn_name;
+  key_seq = other7.key_seq;
+  update_rule = other7.update_rule;
+  delete_rule = other7.delete_rule;
+  fk_name = other7.fk_name;
+  pk_name = other7.pk_name;
+  enable_cstr = other7.enable_cstr;
+  validate_cstr = other7.validate_cstr;
+  rely_cstr = other7.rely_cstr;
+  catName = other7.catName;
+  __isset = other7.__isset;
+  return *this;
+}
+void SQLForeignKey::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "SQLForeignKey(";
+  out << "pktable_db=" << to_string(pktable_db);
+  out << ", " << "pktable_name=" << to_string(pktable_name);
+  out << ", " << "pkcolumn_name=" << to_string(pkcolumn_name);
+  out << ", " << "fktable_db=" << to_string(fktable_db);
+  out << ", " << "fktable_name=" << to_string(fktable_name);
+  out << ", " << "fkcolumn_name=" << to_string(fkcolumn_name);
+  out << ", " << "key_seq=" << to_string(key_seq);
+  out << ", " << "update_rule=" << to_string(update_rule);
+  out << ", " << "delete_rule=" << to_string(delete_rule);
+  out << ", " << "fk_name=" << to_string(fk_name);
+  out << ", " << "pk_name=" << to_string(pk_name);
+  out << ", " << "enable_cstr=" << to_string(enable_cstr);
+  out << ", " << "validate_cstr=" << to_string(validate_cstr);
+  out << ", " << "rely_cstr=" << to_string(rely_cstr);
+  out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+  out << ")";
+}
+
+
+SQLUniqueConstraint::~SQLUniqueConstraint() throw() {
+}
+
+
+void SQLUniqueConstraint::__set_catName(const std::string& val) {
+  this->catName = val;
+}
+
+void SQLUniqueConstraint::__set_table_db(const std::string& val) {
+  this->table_db = val;
+}
+
+void SQLUniqueConstraint::__set_table_name(const std::string& val) {
+  this->table_name = val;
+}
+
+void SQLUniqueConstraint::__set_column_name(const std::string& val) {
+  this->column_name = val;
+}
+
+void SQLUniqueConstraint::__set_key_seq(const int32_t val) {
+  this->key_seq = val;
+}
+
+void SQLUniqueConstraint::__set_uk_name(const std::string& val) {
+  this->uk_name = val;
+}
+
+void SQLUniqueConstraint::__set_enable_cstr(const bool val) {
+  this->enable_cstr = val;
+}
+
+void SQLUniqueConstraint::__set_validate_cstr(const bool val) {
+  this->validate_cstr = val;
+}
+
+void SQLUniqueConstraint::__set_rely_cstr(const bool val) {
+  this->rely_cstr = val;
+}
+
+uint32_t SQLUniqueConstraint::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_db);
+          this->__isset.table_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_name);
+          this->__isset.table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->column_name);
+          this->__isset.column_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->key_seq);
+          this->__isset.key_seq = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->uk_name);
+          this->__isset.uk_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->enable_cstr);
+          this->__isset.enable_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->validate_cstr);
+          this->__isset.validate_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 9:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->rely_cstr);
+          this->__isset.rely_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t SQLUniqueConstraint::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("SQLUniqueConstraint");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->catName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->table_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString(this->column_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 5);
+  xfer += oprot->writeI32(this->key_seq);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("uk_name", ::apache::thrift::protocol::T_STRING, 6);
+  xfer += oprot->writeString(this->uk_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7);
+  xfer += oprot->writeBool(this->enable_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8);
+  xfer += oprot->writeBool(this->validate_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9);
+  xfer += oprot->writeBool(this->rely_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(SQLUniqueConstraint &a, SQLUniqueConstraint &b) {
+  using ::std::swap;
+  swap(a.catName, b.catName);
+  swap(a.table_db, b.table_db);
+  swap(a.table_name, b.table_name);
+  swap(a.column_name, b.column_name);
+  swap(a.key_seq, b.key_seq);
+  swap(a.uk_name, b.uk_name);
+  swap(a.enable_cstr, b.enable_cstr);
+  swap(a.validate_cstr, b.validate_cstr);
+  swap(a.rely_cstr, b.rely_cstr);
+  swap(a.__isset, b.__isset);
+}
+
+SQLUniqueConstraint::SQLUniqueConstraint(const SQLUniqueConstraint& other8) {
+  catName = other8.catName;
+  table_db = other8.table_db;
+  table_name = other8.table_name;
+  column_name = other8.column_name;
+  key_seq = other8.key_seq;
+  uk_name = other8.uk_name;
+  enable_cstr = other8.enable_cstr;
+  validate_cstr = other8.validate_cstr;
+  rely_cstr = other8.rely_cstr;
+  __isset = other8.__isset;
+}
+SQLUniqueConstraint& SQLUniqueConstraint::operator=(const SQLUniqueConstraint& other9) {
+  catName = other9.catName;
+  table_db = other9.table_db;
+  table_name = other9.table_name;
+  column_name = other9.column_name;
+  key_seq = other9.key_seq;
+  uk_name = other9.uk_name;
+  enable_cstr = other9.enable_cstr;
+  validate_cstr = other9.validate_cstr;
+  rely_cstr = other9.rely_cstr;
+  __isset = other9.__isset;
+  return *this;
+}
+void SQLUniqueConstraint::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "SQLUniqueConstraint(";
+  out << "catName=" << to_string(catName);
+  out << ", " << "table_db=" << to_string(table_db);
+  out << ", " << "table_name=" << to_string(table_name);
+  out << ", " << "column_name=" << to_string(column_name);
+  out << ", " << "key_seq=" << to_string(key_seq);
+  out << ", " << "uk_name=" << to_string(uk_name);
+  out << ", " << "enable_cstr=" << to_string(enable_cstr);
+  out << ", " << "validate_cstr=" << to_string(validate_cstr);
+  out << ", " << "rely_cstr=" << to_string(rely_cstr);
+  out << ")";
+}
+
+
+SQLNotNullConstraint::~SQLNotNullConstraint() throw() {
+}
+
+
+void SQLNotNullConstraint::__set_catName(const std::string& val) {
+  this->catName = val;
+}
+
+void SQLNotNullConstraint::__set_table_db(const std::string& val) {
+  this->table_db = val;
+}
+
+void SQLNotNullConstraint::__set_table_name(const std::string& val) {
+  this->table_name = val;
+}
+
+void SQLNotNullConstraint::__set_column_name(const std::string& val) {
+  this->column_name = val;
+}
+
+void SQLNotNullConstraint::__set_nn_name(const std::string& val) {
+  this->nn_name = val;
+}
+
+void SQLNotNullConstraint::__set_enable_cstr(const bool val) {
+  this->enable_cstr = val;
+}
+
+void SQLNotNullConstraint::__set_validate_cstr(const bool val) {
+  this->validate_cstr = val;
+}
+
+void SQLNotNullConstraint::__set_rely_cstr(const bool val) {
+  this->rely_cstr = val;
+}
+
+uint32_t SQLNotNullConstraint::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_db);
+          this->__isset.table_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_name);
+          this->__isset.table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->column_name);
+          this->__isset.column_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->nn_name);
+          this->__isset.nn_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->enable_cstr);
+          this->__isset.enable_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->validate_cstr);
+          this->__isset.validate_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->rely_cstr);
+          this->__isset.rely_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t SQLNotNullConstraint::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("SQLNotNullConstraint");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->catName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->table_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString(this->column_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("nn_name", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->nn_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 6);
+  xfer += oprot->writeBool(this->enable_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 7);
+  xfer += oprot->writeBool(this->validate_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 8);
+  xfer += oprot->writeBool(this->rely_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(SQLNotNullConstraint &a, SQLNotNullConstraint &b) {
+  using ::std::swap;
+  swap(a.catName, b.catName);
+  swap(a.table_db, b.table_db);
+  swap(a.table_name, b.table_name);
+  swap(a.column_name, b.column_name);
+  swap(a.nn_name, b.nn_name);
+  swap(a.enable_cstr, b.enable_cstr);
+  swap(a.validate_cstr, b.validate_cstr);
+  swap(a.rely_cstr, b.rely_cstr);
+  swap(a.__isset, b.__isset);
+}
+
+SQLNotNullConstraint::SQLNotNullConstraint(const SQLNotNullConstraint& other10) {
+  catName = other10.catName;
+  table_db = other10.table_db;
+  table_name = other10.table_name;
+  column_name = other10.column_name;
+  nn_name = other10.nn_name;
+  enable_cstr = other10.enable_cstr;
+  validate_cstr = other10.validate_cstr;
+  rely_cstr = other10.rely_cstr;
+  __isset = other10.__isset;
+}
+SQLNotNullConstraint& SQLNotNullConstraint::operator=(const SQLNotNullConstraint& other11) {
+  catName = other11.catName;
+  table_db = other11.table_db;
+  table_name = other11.table_name;
+  column_name = other11.column_name;
+  nn_name = other11.nn_name;
+  enable_cstr = other11.enable_cstr;
+  validate_cstr = other11.validate_cstr;
+  rely_cstr = other11.rely_cstr;
+  __isset = other11.__isset;
+  return *this;
+}
+void SQLNotNullConstraint::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "SQLNotNullConstraint(";
+  out << "catName=" << to_string(catName);
+  out << ", " << "table_db=" << to_string(table_db);
+  out << ", " << "table_name=" << to_string(table_name);
+  out << ", " << "column_name=" << to_string(column_name);
+  out << ", " << "nn_name=" << to_string(nn_name);
+  out << ", " << "enable_cstr=" << to_string(enable_cstr);
+  out << ", " << "validate_cstr=" << to_string(validate_cstr);
+  out << ", " << "rely_cstr=" << to_string(rely_cstr);
+  out << ")";
+}
+
+
+SQLDefaultConstraint::~SQLDefaultConstraint() throw() {
+}
+
+
+void SQLDefaultConstraint::__set_catName(const std::string& val) {
+  this->catName = val;
+}
+
+void SQLDefaultConstraint::__set_table_db(const std::string& val) {
+  this->table_db = val;
+}
+
+void SQLDefaultConstraint::__set_table_name(const std::string& val) {
+  this->table_name = val;
+}
+
+void SQLDefaultConstraint::__set_column_name(const std::string& val) {
+  this->column_name = val;
+}
+
+void SQLDefaultConstraint::__set_default_value(const std::string& val) {
+  this->default_value = val;
+}
+
+void SQLDefaultConstraint::__set_dc_name(const std::string& val) {
+  this->dc_name = val;
+}
+
+void SQLDefaultConstraint::__set_enable_cstr(const bool val) {
+  this->enable_cstr = val;
+}
+
+void SQLDefaultConstraint::__set_validate_cstr(const bool val) {
+  this->validate_cstr = val;
+}
+
+void SQLDefaultConstraint::__set_rely_cstr(const bool val) {
+  this->rely_cstr = val;
+}
+
+uint32_t SQLDefaultConstraint::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_db);
+          this->__isset.table_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_name);
+          this->__isset.table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->column_name);
+          this->__isset.column_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->default_value);
+          this->__isset.default_value = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dc_name);
+          this->__isset.dc_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->enable_cstr);
+          this->__isset.enable_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->validate_cstr);
+          this->__isset.validate_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 9:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->rely_cstr);
+          this->__isset.rely_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t SQLDefaultConstraint::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("SQLDefaultConstraint");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->catName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->table_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString(this->column_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("default_value", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->default_value);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 6);
+  xfer += oprot->writeString(this->dc_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7);
+  xfer += oprot->writeBool(this->enable_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8);
+  xfer += oprot->writeBool(this->validate_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9);
+  xfer += oprot->writeBool(this->rely_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(SQLDefaultConstraint &a, SQLDefaultConstraint &b) {
+  using ::std::swap;
+  swap(a.catName, b.catName);
+  swap(a.table_db, b.table_db);
+  swap(a.table_name, b.table_name);
+  swap(a.column_name, b.column_name);
+  swap(a.default_value, b.default_value);
+  swap(a.dc_name, b.dc_name);
+  swap(a.enable_cstr, b.enable_cstr);
+  swap(a.validate_cstr, b.validate_cstr);
+  swap(a.rely_cstr, b.rely_cstr);
+  swap(a.__isset, b.__isset);
+}
+
+SQLDefaultConstraint::SQLDefaultConstraint(const SQLDefaultConstraint& other12) {
+  catName = other12.catName;
+  table_db = other12.table_db;
+  table_name = other12.table_name;
+  column_name = other12.column_name;
+  default_value = other12.default_value;
+  dc_name = other12.dc_name;
+  enable_cstr = other12.enable_cstr;
+  validate_cstr = other12.validate_cstr;
+  rely_cstr = other12.rely_cstr;
+  __isset = other12.__isset;
+}
+SQLDefaultConstraint& SQLDefaultConstraint::operator=(const SQLDefaultConstraint& other13) {
+  catName = other13.catName;
+  table_db = other13.table_db;
+  table_name = other13.table_name;
+  column_name = other13.column_name;
+  default_value = other13.default_value;
+  dc_name = other13.dc_name;
+  enable_cstr = other13.enable_cstr;
+  validate_cstr = other13.validate_cstr;
+  rely_cstr = other13.rely_cstr;
+  __isset = other13.__isset;
+  return *this;
+}
+void SQLDefaultConstraint::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "SQLDefaultConstraint(";
+  out << "catName=" << to_string(catName);
+  out << ", " << "table_db=" << to_string(table_db);
+  out << ", " << "table_name=" << to_string(table_name);
+  out << ", " << "column_name=" << to_string(column_name);
+  out << ", " << "default_value=" << to_string(default_value);
+  out << ", " << "dc_name=" << to_string(dc_name);
+  out << ", " << "enable_cstr=" << to_string(enable_cstr);
+  out << ", " << "validate_cstr=" << to_string(validate_cstr);
+  out << ", " << "rely_cstr=" << to_string(rely_cstr);
+  out << ")";
+}
+
+
+SQLCheckConstraint::~SQLCheckConstraint() throw() {
+}
+
+
+void SQLCheckConstraint::__set_catName(const std::string& val) {
+  this->catName = val;
+}
+
+void SQLCheckConstraint::__set_table_db(const std::string& val) {
+  this->table_db = val;
+}
+
+void SQLCheckConstraint::__set_table_name(const std::string& val) {
+  this->table_name = val;
+}
+
+void SQLCheckConstraint::__set_column_name(const std::string& val) {
+  this->column_name = val;
+}
+
+void SQLCheckConstraint::__set_check_expression(const std::string& val) {
+  this->check_expression = val;
+}
+
+void SQLCheckConstraint::__set_dc_name(const std::string& val) {
+  this->dc_name = val;
+}
+
+void SQLCheckConstraint::__set_enable_cstr(const bool val) {
+  this->enable_cstr = val;
+}
+
+void SQLCheckConstraint::__set_validate_cstr(const bool val) {
+  this->validate_cstr = val;
+}
+
+void SQLCheckConstraint::__set_rely_cstr(const bool val) {
+  this->rely_cstr = val;
+}
+
+uint32_t SQLCheckConstraint::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_db);
+          this->__isset.table_db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->table_name);
+          this->__isset.table_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->column_name);
+          this->__isset.column_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->check_expression);
+          this->__isset.check_expression = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dc_name);
+          this->__isset.dc_name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 7:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->enable_cstr);
+          this->__isset.enable_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 8:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->validate_cstr);
+          this->__isset.validate_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 9:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->rely_cstr);
+          this->__isset.rely_cstr = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t SQLCheckConstraint::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("SQLCheckConstraint");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->catName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_db", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->table_db);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->table_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 4);
+  xfer += oprot->writeString(this->column_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("check_expression", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->check_expression);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dc_name", ::apache::thrift::protocol::T_STRING, 6);
+  xfer += oprot->writeString(this->dc_name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("enable_cstr", ::apache::thrift::protocol::T_BOOL, 7);
+  xfer += oprot->writeBool(this->enable_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("validate_cstr", ::apache::thrift::protocol::T_BOOL, 8);
+  xfer += oprot->writeBool(this->validate_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("rely_cstr", ::apache::thrift::protocol::T_BOOL, 9);
+  xfer += oprot->writeBool(this->rely_cstr);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(SQLCheckConstraint &a, SQLCheckConstraint &b) {
+  using ::std::swap;
+  swap(a.catName, b.catName);
+  swap(a.table_db, b.table_db);
+  swap(a.table_name, b.table_name);
+  swap(a.column_name, b.column_name);
+  swap(a.check_expression, b.check_expression);
+  swap(a.dc_name, b.dc_name);
+  swap(a.enable_cstr, b.enable_cstr);
+  swap(a.validate_cstr, b.validate_cstr);
+  swap(a.rely_cstr, b.rely_cstr);
+  swap(a.__isset, b.__isset);
+}
+
+SQLCheckConstraint::SQLCheckConstraint(const SQLCheckConstraint& other14) {
+  catName = other14.catName;
+  table_db = other14.table_db;
+  table_name = other14.table_name;
+  column_name = other14.column_name;
+  check_expression = other14.check_expression;
+  dc_name = other14.dc_name;
+  enable_cstr = other14.enable_cstr;
+  validate_cstr = other14.validate_cstr;
+  rely_cstr = other14.rely_cstr;
+  __isset = other14.__isset;
+}
+SQLCheckConstraint& SQLCheckConstraint::operator=(const SQLCheckConstraint& other15) {
+  catName = other15.catName;
+  table_db = other15.table_db;
+  table_name = other15.table_name;
+  column_name = other15.column_name;
+  check_expression = other15.check_expression;
+  dc_name = other15.dc_name;
+  enable_cstr = other15.enable_cstr;
+  validate_cstr = other15.validate_cstr;
+  rely_cstr = other15.rely_cstr;
+  __isset = other15.__isset;
+  return *this;
+}
+void SQLCheckConstraint::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "SQLCheckConstraint(";
+  out << "catName=" << to_string(catName);
+  out << ", " << "table_db=" << to_string(table_db);
+  out << ", " << "table_name=" << to_string(table_name);
+  out << ", " << "column_name=" << to_string(column_name);
+  out << ", " << "check_expression=" << to_string(check_expression);
+  out << ", " << "dc_name=" << to_string(dc_name);
+  out << ", " << "enable_cstr=" << to_string(enable_cstr);
+  out << ", " << "validate_cstr=" << to_string(validate_cstr);
+  out << ", " << "rely_cstr=" << to_string(rely_cstr);
+  out << ")";
+}
+
+
+Type::~Type() throw() {
+}
+
+
+void Type::__set_name(const std::string& val) {
+  this->name = val;
+}
+
+void Type::__set_type1(const std::string& val) {
+  this->type1 = val;
+__isset.type1 = true;
+}
+
+void Type::__set_type2(const std::string& val) {
+  this->type2 = val;
+__isset.type2 = true;
+}
+
+void Type::__set_fields(const std::vector<FieldSchema> & val) {
+  this->fields = val;
+__isset.fields = true;
+}
+
+uint32_t Type::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->name);
+          this->__isset.name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->type1);
+          this->__isset.type1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->type2);
+          this->__isset.type2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->fields.clear();
+            uint32_t _size16;
+            ::apache::thrift::protocol::TType _etype19;
+            xfer += iprot->readListBegin(_etype19, _size16);
+            this->fields.resize(_size16);
+            uint32_t _i20;
+            for (_i20 = 0; _i20 < _size16; ++_i20)
+            {
+              xfer += this->fields[_i20].read(iprot);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.fields = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t Type::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("Type");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  if (this->__isset.type1) {
+    xfer += oprot->writeFieldBegin("type1", ::apache::thrift::protocol::T_STRING, 2);
+    xfer += oprot->writeString(this->type1);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.type2) {
+    xfer += oprot->writeFieldBegin("type2", ::apache::thrift::protocol::T_STRING, 3);
+    xfer += oprot->writeString(this->type2);
+    xfer += oprot->writeFieldEnd();
+  }
+  if (this->__isset.fields) {
+    xfer += oprot->writeFieldBegin("fields", ::apache::thrift::protocol::T_LIST, 4);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast<uint32_t>(this->fields.size()));
+      std::vector<FieldSchema> ::const_iterator _iter21;
+      for (_iter21 = this->fields.begin(); _iter21 != this->fields.end(); ++_iter21)
+      {
+        xfer += (*_iter21).write(oprot);
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(Type &a, Type &b) {
+  using ::std::swap;
+  swap(a.name, b.name);
+  swap(a.type1, b.type1);
+  swap(a.type2, b.type2);
+  swap(a.fields, b.fields);
+  swap(a.__isset, b.__isset);
+}
+
+Type::Type(const Type& other22) {
+  name = other22.name;
+  type1 = other22.type1;
+  type2 = other22.type2;
+  fields = other22.fields;
+  __isset = other22.__isset;
+}
+Type& Type::operator=(const Type& other23) {
+  name = other23.name;
+  type1 = other23.type1;
+  type2 = other23.type2;
+  fields = other23.fields;
+  __isset = other23.__isset;
+  return *this;
+}
+void Type::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "Type(";
+  out << "name=" << to_string(name);
+  out << ", " << "type1="; (__isset.type1 ? (out << to_string(type1)) : (out << "<null>"));
+  out << ", " << "type2="; (__isset.type2 ? (out << to_string(type2)) : (out << "<null>"));
+  out << ", " << "fields="; (__isset.fields ? (out << to_string(fields)) : (out << "<null>"));
+  out << ")";
+}
+
+
+HiveObjectRef::~HiveObjectRef() throw() {
+}
+
+
+void HiveObjectRef::__set_objectType(const HiveObjectType::type val) {
+  this->objectType = val;
+}
+
+void HiveObjectRef::__set_dbName(const std::string& val) {
+  this->dbName = val;
+}
+
+void HiveObjectRef::__set_objectName(const std::string& val) {
+  this->objectName = val;
+}
+
+void HiveObjectRef::__set_partValues(const std::vector<std::string> & val) {
+  this->partValues = val;
+}
+
+void HiveObjectRef::__set_columnName(const std::string& val) {
+  this->columnName = val;
+}
+
+void HiveObjectRef::__set_catName(const std::string& val) {
+  this->catName = val;
+__isset.catName = true;
+}
+
+uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          int32_t ecast24;
+          xfer += iprot->readI32(ecast24);
+          this->objectType = (HiveObjectType::type)ecast24;
+          this->__isset.objectType = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dbName);
+          this->__isset.dbName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->objectName);
+          this->__isset.objectName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->partValues.clear();
+            uint32_t _size25;
+            ::apache::thrift::protocol::TType _etype28;
+            xfer += iprot->readListBegin(_etype28, _size25);
+            this->partValues.resize(_size25);
+            uint32_t _i29;
+            for (_i29 = 0; _i29 < _size25; ++_i29)
+            {
+              xfer += iprot->readString(this->partValues[_i29]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.partValues = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->columnName);
+          this->__isset.columnName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 6:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->catName);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("HiveObjectRef");
+
+  xfer += oprot->writeFieldBegin("objectType", ::apache::thrift::protocol::T_I32, 1);
+  xfer += oprot->writeI32((int32_t)this->objectType);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("dbName", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->dbName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("objectName", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->objectName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("partValues", ::apache::thrift::protocol::T_LIST, 4);
+  {
+    xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->partValues.size()));
+    std::vector<std::string> ::const_iterator _iter30;
+    for (_iter30 = this->partValues.begin(); _iter30 != this->partValues.end(); ++_iter30)
+    {
+      xfer += oprot->writeString((*_iter30));
+    }
+    xfer += oprot->writeListEnd();
+  }
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("columnName", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->columnName);
+  xfer += oprot->writeFieldEnd();
+
+  if (this->__isset.catName) {
+    xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRING, 6);
+    xfer += oprot->writeString(this->catName);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(HiveObjectRef &a, HiveObjectRef &b) {
+  using ::std::swap;
+  swap(a.objectType, b.objectType);
+  swap(a.dbName, b.dbName);
+  swap(a.objectName, b.objectName);
+  swap(a.partValues, b.partValues);
+  swap(a.columnName, b.columnName);
+  swap(a.catName, b.catName);
+  swap(a.__isset, b.__isset);
+}
+
+HiveObjectRef::HiveObjectRef(const HiveObjectRef& other31) {
+  objectType = other31.objectType;
+  dbName = other31.dbName;
+  objectName = other31.objectName;
+  partValues = other31.partValues;
+  columnName = other31.columnName;
+  catName = other31.catName;
+  __isset = other31.__isset;
+}
+HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other32) {
+  objectType = other32.objectType;
+  dbName = other32.dbName;
+  objectName = other32.objectName;
+  partValues = other32.partValues;
+  columnName = other32.columnName;
+  catName = other32.catName;
+  __isset = other32.__isset;
+  return *this;
+}
+void HiveObjectRef::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "HiveObjectRef(";
+  out << "objectType=" << to_string(objectType);
+  out << ", " << "dbName=" << to_string(dbName);
+  out << ", " << "objectName=" << to_string(objectName);
+  out << ", " << "partValues=" << to_string(partValues);
+  out << ", " << "columnName=" << to_string(columnName);
+  out << ", " << "catName="; (__isset.catName ? (out << to_string(catName)) : (out << "<null>"));
+  out << ")";
+}
+
+
+PrivilegeGrantInfo::~PrivilegeGrantInfo() throw() {
+}
+
+
+void PrivilegeGrantInfo::__set_privilege(const std::string& val) {
+  this->privilege = val;
+}
+
+void PrivilegeGrantInfo::__set_createTime(const int32_t val) {
+  this->createTime = val;
+}
+
+void PrivilegeGrantInfo::__set_grantor(const std::string& val) {
+  this->grantor = val;
+}
+
+void PrivilegeGrantInfo::__set_grantorType(const PrincipalType::type val) {
+  this->grantorType = val;
+}
+
+void PrivilegeGrantInfo::__set_grantOption(const bool val) {
+  this->grantOption = val;
+}
+
+uint32_t PrivilegeGrantInfo::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->privilege);
+          this->__isset.privilege = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          xfer += iprot->readI32(this->createTime);
+          this->__isset.createTime = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->grantor);
+          this->__isset.grantor = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          int32_t ecast33;
+          xfer += iprot->readI32(ecast33);
+          this->grantorType = (PrincipalType::type)ecast33;
+          this->__isset.grantorType = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->grantOption);
+          this->__isset.grantOption = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t PrivilegeGrantInfo::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("PrivilegeGrantInfo");
+
+  xfer += oprot->writeFieldBegin("privilege", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->privilege);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("createTime", ::apache::thrift::protocol::T_I32, 2);
+  xfer += oprot->writeI32(this->createTime);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("grantor", ::apache::thrift::protocol::T_STRING, 3);
+  xfer += oprot->writeString(this->grantor);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("grantorType", ::apache::thrift::protocol::T_I32, 4);
+  xfer += oprot->writeI32((int32_t)this->grantorType);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("grantOption", ::apache::thrift::protocol::T_BOOL, 5);
+  xfer += oprot->writeBool(this->grantOption);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(PrivilegeGrantInfo &a, PrivilegeGrantInfo &b) {
+  using ::std::swap;
+  swap(a.privilege, b.privilege);
+  swap(a.createTime, b.createTime);
+  swap(a.grantor, b.grantor);
+  swap(a.grantorType, b.grantorType);
+  swap(a.grantOption, b.grantOption);
+  swap(a.__isset, b.__isset);
+}
+
+PrivilegeGrantInfo::PrivilegeGrantInfo(const PrivilegeGrantInfo& other34) {
+  privilege = other34.privilege;
+  createTime = other34.createTime;
+  grantor = other34.grantor;
+  grantorType = other34.grantorType;
+  grantOption = other34.grantOption;
+  __isset = other34.__isset;
+}
+PrivilegeGrantInfo& PrivilegeGrantInfo::operator=(const PrivilegeGrantInfo& other35) {
+  privilege = other35.privilege;
+  createTime = other35.createTime;
+  grantor = other35.grantor;
+  grantorType = other35.grantorType;
+  grantOption = other35.grantOption;
+  __isset = other35.__isset;
+  return *this;
+}
+void PrivilegeGrantInfo::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "PrivilegeGrantInfo(";
+  out << "privilege=" << to_string(privilege);
+  out << ", " << "createTime=" << to_string(createTime);
+  out << ", " << "grantor=" << to_string(grantor);
+  out << ", " << "grantorType=" << to_string(grantorType);
+  out << ", " << "grantOption=" << to_string(grantOption);
+  out << ")";
+}
+
+
+HiveObjectPrivilege::~HiveObjectPrivilege() throw() {
+}
+
+
+void HiveObjectPrivilege::__set_hiveObject(const HiveObjectRef& val) {
+  this->hiveObject = val;
+}
+
+void HiveObjectPrivilege::__set_principalName(const std::string& val) {
+  this->principalName = val;
+}
+
+void HiveObjectPrivilege::__set_principalType(const PrincipalType::type val) {
+  this->principalType = val;
+}
+
+void HiveObjectPrivilege::__set_grantInfo(const PrivilegeGrantInfo& val) {
+  this->grantInfo = val;
+}
+
+void HiveObjectPrivilege::__set_authorizer(const std::string& val) {
+  this->authorizer = val;
+}
+
+uint32_t HiveObjectPrivilege::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->hiveObject.read(iprot);
+          this->__isset.hiveObject = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->principalName);
+          this->__isset.principalName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_I32) {
+          int32_t ecast36;
+          xfer += iprot->readI32(ecast36);
+          this->principalType = (PrincipalType::type)ecast36;
+          this->__isset.principalType = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 4:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->grantInfo.read(iprot);
+          this->__isset.grantInfo = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 5:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->authorizer);
+          this->__isset.authorizer = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t HiveObjectPrivilege::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("HiveObjectPrivilege");
+
+  xfer += oprot->writeFieldBegin("hiveObject", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->hiveObject.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("principalName", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->principalName);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("principalType", ::apache::thrift::protocol::T_I32, 3);
+  xfer += oprot->writeI32((int32_t)this->principalType);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("grantInfo", ::apache::thrift::protocol::T_STRUCT, 4);
+  xfer += this->grantInfo.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("authorizer", ::apache::thrift::protocol::T_STRING, 5);
+  xfer += oprot->writeString(this->authorizer);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+void swap(HiveObjectPrivilege &a, HiveObjectPrivilege &b) {
+  using ::std::swap;
+  swap(a.hiveObject, b.hiveObject);
+  swap(a.principalName, b.principalName);
+  swap(a.principalType, b.principalType);
+  swap(a.grantInfo, b.grantInfo);
+  swap(a.authorizer, b.authorizer);
+  swap(a.__isset, b.__isset);
+}
+
+HiveObjectPrivilege::HiveObjectPrivilege(const HiveObjectPrivilege& other37) {
+  hiveObject = other37.hiveObject;
+  principalName = other37.principalName;
+  principalType = other37.principalType;
+  grantInfo = other37.grantInfo;
+  authorizer = other37.authorizer;
+  __isset = other37.__isset;
+}
+HiveObjectPrivilege& HiveObjectPrivilege::operator=(const HiveObjectPrivilege& other38) {
+  hiveObject = other38.hiveObject;
+  principalName = other38.principalName;
+  principalType = other38.principalType;
+  grantInfo = other38.grantInfo;
+  authorizer = other38.authorizer;
+  __isset = other38.__isset;
+  return *this;
+}
+void HiveObjectPrivilege::printTo(std::ostream& out) const {
+  using ::apache::thrift::to_string;
+  out << "HiveObjectPrivilege(";
+  out << "hiveObject=" << to_string(hiveObject);
+  out << ", " << "principalName=" << to_string(principalName);
+  out << ", " << "principalType=" << to_string(principalType);
+  out << ", " << "grantInfo=" << to_string(grantInfo);
+  out << ", " << "authorizer=" << to_string(authorizer);
+  out << ")";
+}
+
+
+PrivilegeBag::~PrivilegeBag() throw() {
+}
+
+
+void PrivilegeBag::__set_privileges(const std::vector<HiveObjectPrivilege> & val) {
+  this->privileges = val;
+}
+
+uint32_t PrivilegeBag::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fnam

<TRUNCATED>

[79/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
index 0000000,add9197..b115f4c
mode 000000,100755..100755
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote
@@@ -1,0 -1,1634 +1,1641 @@@
+ #!/usr/bin/env python
+ #
+ # Autogenerated by Thrift Compiler (0.9.3)
+ #
+ # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ #
+ #  options string: py
+ #
+ 
+ import sys
+ import pprint
+ from urlparse import urlparse
+ from thrift.transport import TTransport
+ from thrift.transport import TSocket
+ from thrift.transport import TSSLSocket
+ from thrift.transport import THttpClient
+ from thrift.protocol import TBinaryProtocol
+ 
+ from hive_metastore import ThriftHiveMetastore
+ from hive_metastore.ttypes import *
+ 
+ if len(sys.argv) <= 1 or sys.argv[1] == '--help':
+   print('')
+   print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]')
+   print('')
+   print('Functions:')
+   print('  string getMetaConf(string key)')
+   print('  void setMetaConf(string key, string value)')
+   print('  void create_catalog(CreateCatalogRequest catalog)')
+   print('  void alter_catalog(AlterCatalogRequest rqst)')
+   print('  GetCatalogResponse get_catalog(GetCatalogRequest catName)')
+   print('  GetCatalogsResponse get_catalogs()')
+   print('  void drop_catalog(DropCatalogRequest catName)')
+   print('  void create_database(Database database)')
+   print('  Database get_database(string name)')
+   print('  void drop_database(string name, bool deleteData, bool cascade)')
+   print('   get_databases(string pattern)')
+   print('   get_all_databases()')
+   print('  void alter_database(string dbname, Database db)')
+   print('  Type get_type(string name)')
+   print('  bool create_type(Type type)')
+   print('  bool drop_type(string type)')
+   print('   get_type_all(string name)')
+   print('   get_fields(string db_name, string table_name)')
+   print('   get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)')
+   print('   get_schema(string db_name, string table_name)')
+   print('   get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)')
+   print('  void create_table(Table tbl)')
+   print('  void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)')
+   print('  void create_table_with_constraints(Table tbl,  primaryKeys,  foreignKeys,  uniqueConstraints,  notNullConstraints,  defaultConstraints,  checkConstraints)')
+   print('  void drop_constraint(DropConstraintRequest req)')
+   print('  void add_primary_key(AddPrimaryKeyRequest req)')
+   print('  void add_foreign_key(AddForeignKeyRequest req)')
+   print('  void add_unique_constraint(AddUniqueConstraintRequest req)')
+   print('  void add_not_null_constraint(AddNotNullConstraintRequest req)')
+   print('  void add_default_constraint(AddDefaultConstraintRequest req)')
+   print('  void add_check_constraint(AddCheckConstraintRequest req)')
+   print('  void drop_table(string dbname, string name, bool deleteData)')
+   print('  void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)')
+   print('  void truncate_table(string dbName, string tableName,  partNames)')
+   print('   get_tables(string db_name, string pattern)')
+   print('   get_tables_by_type(string db_name, string pattern, string tableType)')
+   print('   get_materialized_views_for_rewriting(string db_name)')
+   print('   get_table_meta(string db_patterns, string tbl_patterns,  tbl_types)')
+   print('   get_all_tables(string db_name)')
+   print('  Table get_table(string dbname, string tbl_name)')
+   print('   get_table_objects_by_name(string dbname,  tbl_names)')
+   print('  GetTableResult get_table_req(GetTableRequest req)')
+   print('  GetTablesResult get_table_objects_by_name_req(GetTablesRequest req)')
+   print('   get_materialization_invalidation_info(string dbname,  tbl_names)')
+   print('  void update_creation_metadata(string catName, string dbname, string tbl_name, CreationMetadata creation_metadata)')
+   print('   get_table_names_by_filter(string dbname, string filter, i16 max_tables)')
+   print('  void alter_table(string dbname, string tbl_name, Table new_tbl)')
+   print('  void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)')
+   print('  void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)')
+   print('  Partition add_partition(Partition new_part)')
+   print('  Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)')
+   print('  i32 add_partitions( new_parts)')
+   print('  i32 add_partitions_pspec( new_parts)')
+   print('  Partition append_partition(string db_name, string tbl_name,  part_vals)')
+   print('  AddPartitionsResult add_partitions_req(AddPartitionsRequest request)')
+   print('  Partition append_partition_with_environment_context(string db_name, string tbl_name,  part_vals, EnvironmentContext environment_context)')
+   print('  Partition append_partition_by_name(string db_name, string tbl_name, string part_name)')
+   print('  Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)')
+   print('  bool drop_partition(string db_name, string tbl_name,  part_vals, bool deleteData)')
+   print('  bool drop_partition_with_environment_context(string db_name, string tbl_name,  part_vals, bool deleteData, EnvironmentContext environment_context)')
+   print('  bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)')
+   print('  bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)')
+   print('  DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)')
+   print('  Partition get_partition(string db_name, string tbl_name,  part_vals)')
+   print('  Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
+   print('   exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)')
+   print('  Partition get_partition_with_auth(string db_name, string tbl_name,  part_vals, string user_name,  group_names)')
+   print('  Partition get_partition_by_name(string db_name, string tbl_name, string part_name)')
+   print('   get_partitions(string db_name, string tbl_name, i16 max_parts)')
+   print('   get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name,  group_names)')
+   print('   get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)')
+   print('   get_partition_names(string db_name, string tbl_name, i16 max_parts)')
+   print('  PartitionValuesResponse get_partition_values(PartitionValuesRequest request)')
+   print('   get_partitions_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)')
+   print('   get_partitions_ps_with_auth(string db_name, string tbl_name,  part_vals, i16 max_parts, string user_name,  group_names)')
+   print('   get_partition_names_ps(string db_name, string tbl_name,  part_vals, i16 max_parts)')
+   print('   get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)')
+   print('   get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)')
+   print('  PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)')
+   print('  i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter)')
+   print('   get_partitions_by_names(string db_name, string tbl_name,  names)')
+   print('  void alter_partition(string db_name, string tbl_name, Partition new_part)')
+   print('  void alter_partitions(string db_name, string tbl_name,  new_parts)')
+   print('  void alter_partitions_with_environment_context(string db_name, string tbl_name,  new_parts, EnvironmentContext environment_context)')
++  print('  AlterPartitionsResponse alter_partitions_with_environment_context_req(AlterPartitionsRequest req)')
+   print('  void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)')
+   print('  void rename_partition(string db_name, string tbl_name,  part_vals, Partition new_part)')
+   print('  bool partition_name_has_valid_characters( part_vals, bool throw_exception)')
+   print('  string get_config_value(string name, string defaultValue)')
+   print('   partition_name_to_vals(string part_name)')
+   print('   partition_name_to_spec(string part_name)')
+   print('  void markPartitionForEvent(string db_name, string tbl_name,  part_vals, PartitionEventType eventType)')
+   print('  bool isPartitionMarkedForEvent(string db_name, string tbl_name,  part_vals, PartitionEventType eventType)')
+   print('  PrimaryKeysResponse get_primary_keys(PrimaryKeysRequest request)')
+   print('  ForeignKeysResponse get_foreign_keys(ForeignKeysRequest request)')
+   print('  UniqueConstraintsResponse get_unique_constraints(UniqueConstraintsRequest request)')
+   print('  NotNullConstraintsResponse get_not_null_constraints(NotNullConstraintsRequest request)')
+   print('  DefaultConstraintsResponse get_default_constraints(DefaultConstraintsRequest request)')
+   print('  CheckConstraintsResponse get_check_constraints(CheckConstraintsRequest request)')
+   print('  bool update_table_column_statistics(ColumnStatistics stats_obj)')
+   print('  bool update_partition_column_statistics(ColumnStatistics stats_obj)')
+   print('  ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)')
+   print('  ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)')
+   print('  TableStatsResult get_table_statistics_req(TableStatsRequest request)')
+   print('  PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)')
+   print('  AggrStats get_aggr_stats_for(PartitionsStatsRequest request)')
+   print('  bool set_aggr_stats_for(SetPartitionsStatsRequest request)')
+   print('  bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)')
+   print('  bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)')
+   print('  void create_function(Function func)')
+   print('  void drop_function(string dbName, string funcName)')
+   print('  void alter_function(string dbName, string funcName, Function newFunc)')
+   print('   get_functions(string dbName, string pattern)')
+   print('  Function get_function(string dbName, string funcName)')
+   print('  GetAllFunctionsResponse get_all_functions()')
+   print('  bool create_role(Role role)')
+   print('  bool drop_role(string role_name)')
+   print('   get_role_names()')
+   print('  bool grant_role(string role_name, string principal_name, PrincipalType principal_type, string grantor, PrincipalType grantorType, bool grant_option)')
+   print('  bool revoke_role(string role_name, string principal_name, PrincipalType principal_type)')
+   print('   list_roles(string principal_name, PrincipalType principal_type)')
+   print('  GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request)')
+   print('  GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)')
+   print('  GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request)')
+   print('  PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, string user_name,  group_names)')
+   print('   list_privileges(string principal_name, PrincipalType principal_type, HiveObjectRef hiveObject)')
+   print('  bool grant_privileges(PrivilegeBag privileges)')
+   print('  bool revoke_privileges(PrivilegeBag privileges)')
+   print('  GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request)')
+   print('  GrantRevokePrivilegeResponse refresh_privileges(HiveObjectRef objToRefresh, string authorizer, GrantRevokePrivilegeRequest grantRequest)')
+   print('   set_ugi(string user_name,  group_names)')
+   print('  string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)')
+   print('  i64 renew_delegation_token(string token_str_form)')
+   print('  void cancel_delegation_token(string token_str_form)')
+   print('  bool add_token(string token_identifier, string delegation_token)')
+   print('  bool remove_token(string token_identifier)')
+   print('  string get_token(string token_identifier)')
+   print('   get_all_token_identifiers()')
+   print('  i32 add_master_key(string key)')
+   print('  void update_master_key(i32 seq_number, string key)')
+   print('  bool remove_master_key(i32 key_seq)')
+   print('   get_master_keys()')
+   print('  GetOpenTxnsResponse get_open_txns()')
+   print('  GetOpenTxnsInfoResponse get_open_txns_info()')
+   print('  OpenTxnsResponse open_txns(OpenTxnRequest rqst)')
+   print('  void abort_txn(AbortTxnRequest rqst)')
+   print('  void abort_txns(AbortTxnsRequest rqst)')
+   print('  void commit_txn(CommitTxnRequest rqst)')
+   print('  void repl_tbl_writeid_state(ReplTblWriteIdStateRequest rqst)')
+   print('  GetValidWriteIdsResponse get_valid_write_ids(GetValidWriteIdsRequest rqst)')
+   print('  AllocateTableWriteIdsResponse allocate_table_write_ids(AllocateTableWriteIdsRequest rqst)')
+   print('  LockResponse lock(LockRequest rqst)')
+   print('  LockResponse check_lock(CheckLockRequest rqst)')
+   print('  void unlock(UnlockRequest rqst)')
+   print('  ShowLocksResponse show_locks(ShowLocksRequest rqst)')
+   print('  void heartbeat(HeartbeatRequest ids)')
+   print('  HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns)')
+   print('  void compact(CompactionRequest rqst)')
+   print('  CompactionResponse compact2(CompactionRequest rqst)')
+   print('  ShowCompactResponse show_compact(ShowCompactRequest rqst)')
+   print('  void add_dynamic_partitions(AddDynamicPartitions rqst)')
+   print('  NotificationEventResponse get_next_notification(NotificationEventRequest rqst)')
+   print('  CurrentNotificationEventId get_current_notificationEventId()')
+   print('  NotificationEventsCountResponse get_notification_events_count(NotificationEventsCountRequest rqst)')
+   print('  FireEventResponse fire_listener_event(FireEventRequest rqst)')
+   print('  void flushCache()')
+   print('  WriteNotificationLogResponse add_write_notification_log(WriteNotificationLogRequest rqst)')
+   print('  CmRecycleResponse cm_recycle(CmRecycleRequest request)')
+   print('  GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req)')
+   print('  GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req)')
+   print('  PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)')
+   print('  ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)')
+   print('  CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req)')
+   print('  string get_metastore_db_uuid()')
+   print('  WMCreateResourcePlanResponse create_resource_plan(WMCreateResourcePlanRequest request)')
+   print('  WMGetResourcePlanResponse get_resource_plan(WMGetResourcePlanRequest request)')
+   print('  WMGetActiveResourcePlanResponse get_active_resource_plan(WMGetActiveResourcePlanRequest request)')
+   print('  WMGetAllResourcePlanResponse get_all_resource_plans(WMGetAllResourcePlanRequest request)')
+   print('  WMAlterResourcePlanResponse alter_resource_plan(WMAlterResourcePlanRequest request)')
+   print('  WMValidateResourcePlanResponse validate_resource_plan(WMValidateResourcePlanRequest request)')
+   print('  WMDropResourcePlanResponse drop_resource_plan(WMDropResourcePlanRequest request)')
+   print('  WMCreateTriggerResponse create_wm_trigger(WMCreateTriggerRequest request)')
+   print('  WMAlterTriggerResponse alter_wm_trigger(WMAlterTriggerRequest request)')
+   print('  WMDropTriggerResponse drop_wm_trigger(WMDropTriggerRequest request)')
+   print('  WMGetTriggersForResourePlanResponse get_triggers_for_resourceplan(WMGetTriggersForResourePlanRequest request)')
+   print('  WMCreatePoolResponse create_wm_pool(WMCreatePoolRequest request)')
+   print('  WMAlterPoolResponse alter_wm_pool(WMAlterPoolRequest request)')
+   print('  WMDropPoolResponse drop_wm_pool(WMDropPoolRequest request)')
+   print('  WMCreateOrUpdateMappingResponse create_or_update_wm_mapping(WMCreateOrUpdateMappingRequest request)')
+   print('  WMDropMappingResponse drop_wm_mapping(WMDropMappingRequest request)')
+   print('  WMCreateOrDropTriggerToPoolMappingResponse create_or_drop_wm_trigger_to_pool_mapping(WMCreateOrDropTriggerToPoolMappingRequest request)')
+   print('  void create_ischema(ISchema schema)')
+   print('  void alter_ischema(AlterISchemaRequest rqst)')
+   print('  ISchema get_ischema(ISchemaName name)')
+   print('  void drop_ischema(ISchemaName name)')
+   print('  void add_schema_version(SchemaVersion schemaVersion)')
+   print('  SchemaVersion get_schema_version(SchemaVersionDescriptor schemaVersion)')
+   print('  SchemaVersion get_schema_latest_version(ISchemaName schemaName)')
+   print('   get_schema_all_versions(ISchemaName schemaName)')
+   print('  void drop_schema_version(SchemaVersionDescriptor schemaVersion)')
+   print('  FindSchemasByColsResp get_schemas_by_cols(FindSchemasByColsRqst rqst)')
+   print('  void map_schema_version_to_serde(MapSchemaVersionToSerdeRequest rqst)')
+   print('  void set_schema_version_state(SetSchemaVersionStateRequest rqst)')
+   print('  void add_serde(SerDeInfo serde)')
+   print('  SerDeInfo get_serde(GetSerdeRequest rqst)')
+   print('  LockResponse get_lock_materialization_rebuild(string dbName, string tableName, i64 txnId)')
+   print('  bool heartbeat_lock_materialization_rebuild(string dbName, string tableName, i64 txnId)')
+   print('  void add_runtime_stats(RuntimeStat stat)')
+   print('   get_runtime_stats(GetRuntimeStatsRequest rqst)')
+   print('  string getName()')
+   print('  string getVersion()')
+   print('  fb_status getStatus()')
+   print('  string getStatusDetails()')
+   print('   getCounters()')
+   print('  i64 getCounter(string key)')
+   print('  void setOption(string key, string value)')
+   print('  string getOption(string key)')
+   print('   getOptions()')
+   print('  string getCpuProfile(i32 profileDurationInSec)')
+   print('  i64 aliveSince()')
+   print('  void reinitialize()')
+   print('  void shutdown()')
+   print('')
+   sys.exit(0)
+ 
+ pp = pprint.PrettyPrinter(indent = 2)
+ host = 'localhost'
+ port = 9090
+ uri = ''
+ framed = False
+ ssl = False
+ http = False
+ argi = 1
+ 
+ if sys.argv[argi] == '-h':
+   parts = sys.argv[argi+1].split(':')
+   host = parts[0]
+   if len(parts) > 1:
+     port = int(parts[1])
+   argi += 2
+ 
+ if sys.argv[argi] == '-u':
+   url = urlparse(sys.argv[argi+1])
+   parts = url[1].split(':')
+   host = parts[0]
+   if len(parts) > 1:
+     port = int(parts[1])
+   else:
+     port = 80
+   uri = url[2]
+   if url[4]:
+     uri += '?%s' % url[4]
+   http = True
+   argi += 2
+ 
+ if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed':
+   framed = True
+   argi += 1
+ 
+ if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl':
+   ssl = True
+   argi += 1
+ 
+ cmd = sys.argv[argi]
+ args = sys.argv[argi+1:]
+ 
+ if http:
+   transport = THttpClient.THttpClient(host, port, uri)
+ else:
+   socket = TSSLSocket.TSSLSocket(host, port, validate=False) if ssl else TSocket.TSocket(host, port)
+   if framed:
+     transport = TTransport.TFramedTransport(socket)
+   else:
+     transport = TTransport.TBufferedTransport(socket)
+ protocol = TBinaryProtocol.TBinaryProtocol(transport)
+ client = ThriftHiveMetastore.Client(protocol)
+ transport.open()
+ 
+ if cmd == 'getMetaConf':
+   if len(args) != 1:
+     print('getMetaConf requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.getMetaConf(args[0],))
+ 
+ elif cmd == 'setMetaConf':
+   if len(args) != 2:
+     print('setMetaConf requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.setMetaConf(args[0],args[1],))
+ 
+ elif cmd == 'create_catalog':
+   if len(args) != 1:
+     print('create_catalog requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_catalog(eval(args[0]),))
+ 
+ elif cmd == 'alter_catalog':
+   if len(args) != 1:
+     print('alter_catalog requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.alter_catalog(eval(args[0]),))
+ 
+ elif cmd == 'get_catalog':
+   if len(args) != 1:
+     print('get_catalog requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_catalog(eval(args[0]),))
+ 
+ elif cmd == 'get_catalogs':
+   if len(args) != 0:
+     print('get_catalogs requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_catalogs())
+ 
+ elif cmd == 'drop_catalog':
+   if len(args) != 1:
+     print('drop_catalog requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_catalog(eval(args[0]),))
+ 
+ elif cmd == 'create_database':
+   if len(args) != 1:
+     print('create_database requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_database(eval(args[0]),))
+ 
+ elif cmd == 'get_database':
+   if len(args) != 1:
+     print('get_database requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_database(args[0],))
+ 
+ elif cmd == 'drop_database':
+   if len(args) != 3:
+     print('drop_database requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.drop_database(args[0],eval(args[1]),eval(args[2]),))
+ 
+ elif cmd == 'get_databases':
+   if len(args) != 1:
+     print('get_databases requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_databases(args[0],))
+ 
+ elif cmd == 'get_all_databases':
+   if len(args) != 0:
+     print('get_all_databases requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_all_databases())
+ 
+ elif cmd == 'alter_database':
+   if len(args) != 2:
+     print('alter_database requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.alter_database(args[0],eval(args[1]),))
+ 
+ elif cmd == 'get_type':
+   if len(args) != 1:
+     print('get_type requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_type(args[0],))
+ 
+ elif cmd == 'create_type':
+   if len(args) != 1:
+     print('create_type requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_type(eval(args[0]),))
+ 
+ elif cmd == 'drop_type':
+   if len(args) != 1:
+     print('drop_type requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_type(args[0],))
+ 
+ elif cmd == 'get_type_all':
+   if len(args) != 1:
+     print('get_type_all requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_type_all(args[0],))
+ 
+ elif cmd == 'get_fields':
+   if len(args) != 2:
+     print('get_fields requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_fields(args[0],args[1],))
+ 
+ elif cmd == 'get_fields_with_environment_context':
+   if len(args) != 3:
+     print('get_fields_with_environment_context requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_fields_with_environment_context(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_schema':
+   if len(args) != 2:
+     print('get_schema requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_schema(args[0],args[1],))
+ 
+ elif cmd == 'get_schema_with_environment_context':
+   if len(args) != 3:
+     print('get_schema_with_environment_context requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'create_table':
+   if len(args) != 1:
+     print('create_table requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_table(eval(args[0]),))
+ 
+ elif cmd == 'create_table_with_environment_context':
+   if len(args) != 2:
+     print('create_table_with_environment_context requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),))
+ 
+ elif cmd == 'create_table_with_constraints':
+   if len(args) != 7:
+     print('create_table_with_constraints requires 7 args')
+     sys.exit(1)
+   pp.pprint(client.create_table_with_constraints(eval(args[0]),eval(args[1]),eval(args[2]),eval(args[3]),eval(args[4]),eval(args[5]),eval(args[6]),))
+ 
+ elif cmd == 'drop_constraint':
+   if len(args) != 1:
+     print('drop_constraint requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_constraint(eval(args[0]),))
+ 
+ elif cmd == 'add_primary_key':
+   if len(args) != 1:
+     print('add_primary_key requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_primary_key(eval(args[0]),))
+ 
+ elif cmd == 'add_foreign_key':
+   if len(args) != 1:
+     print('add_foreign_key requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_foreign_key(eval(args[0]),))
+ 
+ elif cmd == 'add_unique_constraint':
+   if len(args) != 1:
+     print('add_unique_constraint requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_unique_constraint(eval(args[0]),))
+ 
+ elif cmd == 'add_not_null_constraint':
+   if len(args) != 1:
+     print('add_not_null_constraint requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_not_null_constraint(eval(args[0]),))
+ 
+ elif cmd == 'add_default_constraint':
+   if len(args) != 1:
+     print('add_default_constraint requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_default_constraint(eval(args[0]),))
+ 
+ elif cmd == 'add_check_constraint':
+   if len(args) != 1:
+     print('add_check_constraint requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_check_constraint(eval(args[0]),))
+ 
+ elif cmd == 'drop_table':
+   if len(args) != 3:
+     print('drop_table requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.drop_table(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'drop_table_with_environment_context':
+   if len(args) != 4:
+     print('drop_table_with_environment_context requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.drop_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'truncate_table':
+   if len(args) != 3:
+     print('truncate_table requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.truncate_table(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_tables':
+   if len(args) != 2:
+     print('get_tables requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_tables(args[0],args[1],))
+ 
+ elif cmd == 'get_tables_by_type':
+   if len(args) != 3:
+     print('get_tables_by_type requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_tables_by_type(args[0],args[1],args[2],))
+ 
+ elif cmd == 'get_materialized_views_for_rewriting':
+   if len(args) != 1:
+     print('get_materialized_views_for_rewriting requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_materialized_views_for_rewriting(args[0],))
+ 
+ elif cmd == 'get_table_meta':
+   if len(args) != 3:
+     print('get_table_meta requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_meta(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_all_tables':
+   if len(args) != 1:
+     print('get_all_tables requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_all_tables(args[0],))
+ 
+ elif cmd == 'get_table':
+   if len(args) != 2:
+     print('get_table requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_table(args[0],args[1],))
+ 
+ elif cmd == 'get_table_objects_by_name':
+   if len(args) != 2:
+     print('get_table_objects_by_name requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_objects_by_name(args[0],eval(args[1]),))
+ 
+ elif cmd == 'get_table_req':
+   if len(args) != 1:
+     print('get_table_req requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_req(eval(args[0]),))
+ 
+ elif cmd == 'get_table_objects_by_name_req':
+   if len(args) != 1:
+     print('get_table_objects_by_name_req requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_objects_by_name_req(eval(args[0]),))
+ 
+ elif cmd == 'get_materialization_invalidation_info':
+   if len(args) != 2:
+     print('get_materialization_invalidation_info requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_materialization_invalidation_info(args[0],eval(args[1]),))
+ 
+ elif cmd == 'update_creation_metadata':
+   if len(args) != 4:
+     print('update_creation_metadata requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.update_creation_metadata(args[0],args[1],args[2],eval(args[3]),))
+ 
+ elif cmd == 'get_table_names_by_filter':
+   if len(args) != 3:
+     print('get_table_names_by_filter requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_names_by_filter(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'alter_table':
+   if len(args) != 3:
+     print('alter_table requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.alter_table(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'alter_table_with_environment_context':
+   if len(args) != 4:
+     print('alter_table_with_environment_context requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.alter_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'alter_table_with_cascade':
+   if len(args) != 4:
+     print('alter_table_with_cascade requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.alter_table_with_cascade(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'add_partition':
+   if len(args) != 1:
+     print('add_partition requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_partition(eval(args[0]),))
+ 
+ elif cmd == 'add_partition_with_environment_context':
+   if len(args) != 2:
+     print('add_partition_with_environment_context requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.add_partition_with_environment_context(eval(args[0]),eval(args[1]),))
+ 
+ elif cmd == 'add_partitions':
+   if len(args) != 1:
+     print('add_partitions requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_partitions(eval(args[0]),))
+ 
+ elif cmd == 'add_partitions_pspec':
+   if len(args) != 1:
+     print('add_partitions_pspec requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_partitions_pspec(eval(args[0]),))
+ 
+ elif cmd == 'append_partition':
+   if len(args) != 3:
+     print('append_partition requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.append_partition(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'add_partitions_req':
+   if len(args) != 1:
+     print('add_partitions_req requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_partitions_req(eval(args[0]),))
+ 
+ elif cmd == 'append_partition_with_environment_context':
+   if len(args) != 4:
+     print('append_partition_with_environment_context requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.append_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'append_partition_by_name':
+   if len(args) != 3:
+     print('append_partition_by_name requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.append_partition_by_name(args[0],args[1],args[2],))
+ 
+ elif cmd == 'append_partition_by_name_with_environment_context':
+   if len(args) != 4:
+     print('append_partition_by_name_with_environment_context requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.append_partition_by_name_with_environment_context(args[0],args[1],args[2],eval(args[3]),))
+ 
+ elif cmd == 'drop_partition':
+   if len(args) != 4:
+     print('drop_partition requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.drop_partition(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'drop_partition_with_environment_context':
+   if len(args) != 5:
+     print('drop_partition_with_environment_context requires 5 args')
+     sys.exit(1)
+   pp.pprint(client.drop_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),eval(args[4]),))
+ 
+ elif cmd == 'drop_partition_by_name':
+   if len(args) != 4:
+     print('drop_partition_by_name requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.drop_partition_by_name(args[0],args[1],args[2],eval(args[3]),))
+ 
+ elif cmd == 'drop_partition_by_name_with_environment_context':
+   if len(args) != 5:
+     print('drop_partition_by_name_with_environment_context requires 5 args')
+     sys.exit(1)
+   pp.pprint(client.drop_partition_by_name_with_environment_context(args[0],args[1],args[2],eval(args[3]),eval(args[4]),))
+ 
+ elif cmd == 'drop_partitions_req':
+   if len(args) != 1:
+     print('drop_partitions_req requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_partitions_req(eval(args[0]),))
+ 
+ elif cmd == 'get_partition':
+   if len(args) != 3:
+     print('get_partition requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'exchange_partition':
+   if len(args) != 5:
+     print('exchange_partition requires 5 args')
+     sys.exit(1)
+   pp.pprint(client.exchange_partition(eval(args[0]),args[1],args[2],args[3],args[4],))
+ 
+ elif cmd == 'exchange_partitions':
+   if len(args) != 5:
+     print('exchange_partitions requires 5 args')
+     sys.exit(1)
+   pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],))
+ 
+ elif cmd == 'get_partition_with_auth':
+   if len(args) != 5:
+     print('get_partition_with_auth requires 5 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),))
+ 
+ elif cmd == 'get_partition_by_name':
+   if len(args) != 3:
+     print('get_partition_by_name requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],))
+ 
+ elif cmd == 'get_partitions':
+   if len(args) != 3:
+     print('get_partitions requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_partitions_with_auth':
+   if len(args) != 5:
+     print('get_partitions_with_auth requires 5 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),))
+ 
+ elif cmd == 'get_partitions_pspec':
+   if len(args) != 3:
+     print('get_partitions_pspec requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_pspec(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_partition_names':
+   if len(args) != 3:
+     print('get_partition_names requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_partition_values':
+   if len(args) != 1:
+     print('get_partition_values requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition_values(eval(args[0]),))
+ 
+ elif cmd == 'get_partitions_ps':
+   if len(args) != 4:
+     print('get_partitions_ps requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'get_partitions_ps_with_auth':
+   if len(args) != 6:
+     print('get_partitions_ps_with_auth requires 6 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_ps_with_auth(args[0],args[1],eval(args[2]),eval(args[3]),args[4],eval(args[5]),))
+ 
+ elif cmd == 'get_partition_names_ps':
+   if len(args) != 4:
+     print('get_partition_names_ps requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'get_partitions_by_filter':
+   if len(args) != 4:
+     print('get_partitions_by_filter requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),))
+ 
+ elif cmd == 'get_part_specs_by_filter':
+   if len(args) != 4:
+     print('get_part_specs_by_filter requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.get_part_specs_by_filter(args[0],args[1],args[2],eval(args[3]),))
+ 
+ elif cmd == 'get_partitions_by_expr':
+   if len(args) != 1:
+     print('get_partitions_by_expr requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_by_expr(eval(args[0]),))
+ 
+ elif cmd == 'get_num_partitions_by_filter':
+   if len(args) != 3:
+     print('get_num_partitions_by_filter requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_num_partitions_by_filter(args[0],args[1],args[2],))
+ 
+ elif cmd == 'get_partitions_by_names':
+   if len(args) != 3:
+     print('get_partitions_by_names requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'alter_partition':
+   if len(args) != 3:
+     print('alter_partition requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'alter_partitions':
+   if len(args) != 3:
+     print('alter_partitions requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'alter_partitions_with_environment_context':
+   if len(args) != 4:
+     print('alter_partitions_with_environment_context requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
++elif cmd == 'alter_partitions_with_environment_context_req':
++  if len(args) != 1:
++    print('alter_partitions_with_environment_context_req requires 1 args')
++    sys.exit(1)
++  pp.pprint(client.alter_partitions_with_environment_context_req(eval(args[0]),))
++
+ elif cmd == 'alter_partition_with_environment_context':
+   if len(args) != 4:
+     print('alter_partition_with_environment_context requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.alter_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'rename_partition':
+   if len(args) != 4:
+     print('rename_partition requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.rename_partition(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'partition_name_has_valid_characters':
+   if len(args) != 2:
+     print('partition_name_has_valid_characters requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.partition_name_has_valid_characters(eval(args[0]),eval(args[1]),))
+ 
+ elif cmd == 'get_config_value':
+   if len(args) != 2:
+     print('get_config_value requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_config_value(args[0],args[1],))
+ 
+ elif cmd == 'partition_name_to_vals':
+   if len(args) != 1:
+     print('partition_name_to_vals requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.partition_name_to_vals(args[0],))
+ 
+ elif cmd == 'partition_name_to_spec':
+   if len(args) != 1:
+     print('partition_name_to_spec requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.partition_name_to_spec(args[0],))
+ 
+ elif cmd == 'markPartitionForEvent':
+   if len(args) != 4:
+     print('markPartitionForEvent requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.markPartitionForEvent(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'isPartitionMarkedForEvent':
+   if len(args) != 4:
+     print('isPartitionMarkedForEvent requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.isPartitionMarkedForEvent(args[0],args[1],eval(args[2]),eval(args[3]),))
+ 
+ elif cmd == 'get_primary_keys':
+   if len(args) != 1:
+     print('get_primary_keys requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_primary_keys(eval(args[0]),))
+ 
+ elif cmd == 'get_foreign_keys':
+   if len(args) != 1:
+     print('get_foreign_keys requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_foreign_keys(eval(args[0]),))
+ 
+ elif cmd == 'get_unique_constraints':
+   if len(args) != 1:
+     print('get_unique_constraints requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_unique_constraints(eval(args[0]),))
+ 
+ elif cmd == 'get_not_null_constraints':
+   if len(args) != 1:
+     print('get_not_null_constraints requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_not_null_constraints(eval(args[0]),))
+ 
+ elif cmd == 'get_default_constraints':
+   if len(args) != 1:
+     print('get_default_constraints requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_default_constraints(eval(args[0]),))
+ 
+ elif cmd == 'get_check_constraints':
+   if len(args) != 1:
+     print('get_check_constraints requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_check_constraints(eval(args[0]),))
+ 
+ elif cmd == 'update_table_column_statistics':
+   if len(args) != 1:
+     print('update_table_column_statistics requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.update_table_column_statistics(eval(args[0]),))
+ 
+ elif cmd == 'update_partition_column_statistics':
+   if len(args) != 1:
+     print('update_partition_column_statistics requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.update_partition_column_statistics(eval(args[0]),))
+ 
+ elif cmd == 'get_table_column_statistics':
+   if len(args) != 3:
+     print('get_table_column_statistics requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_column_statistics(args[0],args[1],args[2],))
+ 
+ elif cmd == 'get_partition_column_statistics':
+   if len(args) != 4:
+     print('get_partition_column_statistics requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.get_partition_column_statistics(args[0],args[1],args[2],args[3],))
+ 
+ elif cmd == 'get_table_statistics_req':
+   if len(args) != 1:
+     print('get_table_statistics_req requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_table_statistics_req(eval(args[0]),))
+ 
+ elif cmd == 'get_partitions_statistics_req':
+   if len(args) != 1:
+     print('get_partitions_statistics_req requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_partitions_statistics_req(eval(args[0]),))
+ 
+ elif cmd == 'get_aggr_stats_for':
+   if len(args) != 1:
+     print('get_aggr_stats_for requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_aggr_stats_for(eval(args[0]),))
+ 
+ elif cmd == 'set_aggr_stats_for':
+   if len(args) != 1:
+     print('set_aggr_stats_for requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.set_aggr_stats_for(eval(args[0]),))
+ 
+ elif cmd == 'delete_partition_column_statistics':
+   if len(args) != 4:
+     print('delete_partition_column_statistics requires 4 args')
+     sys.exit(1)
+   pp.pprint(client.delete_partition_column_statistics(args[0],args[1],args[2],args[3],))
+ 
+ elif cmd == 'delete_table_column_statistics':
+   if len(args) != 3:
+     print('delete_table_column_statistics requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.delete_table_column_statistics(args[0],args[1],args[2],))
+ 
+ elif cmd == 'create_function':
+   if len(args) != 1:
+     print('create_function requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_function(eval(args[0]),))
+ 
+ elif cmd == 'drop_function':
+   if len(args) != 2:
+     print('drop_function requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.drop_function(args[0],args[1],))
+ 
+ elif cmd == 'alter_function':
+   if len(args) != 3:
+     print('alter_function requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.alter_function(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'get_functions':
+   if len(args) != 2:
+     print('get_functions requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_functions(args[0],args[1],))
+ 
+ elif cmd == 'get_function':
+   if len(args) != 2:
+     print('get_function requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_function(args[0],args[1],))
+ 
+ elif cmd == 'get_all_functions':
+   if len(args) != 0:
+     print('get_all_functions requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_all_functions())
+ 
+ elif cmd == 'create_role':
+   if len(args) != 1:
+     print('create_role requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_role(eval(args[0]),))
+ 
+ elif cmd == 'drop_role':
+   if len(args) != 1:
+     print('drop_role requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_role(args[0],))
+ 
+ elif cmd == 'get_role_names':
+   if len(args) != 0:
+     print('get_role_names requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_role_names())
+ 
+ elif cmd == 'grant_role':
+   if len(args) != 6:
+     print('grant_role requires 6 args')
+     sys.exit(1)
+   pp.pprint(client.grant_role(args[0],args[1],eval(args[2]),args[3],eval(args[4]),eval(args[5]),))
+ 
+ elif cmd == 'revoke_role':
+   if len(args) != 3:
+     print('revoke_role requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.revoke_role(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'list_roles':
+   if len(args) != 2:
+     print('list_roles requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.list_roles(args[0],eval(args[1]),))
+ 
+ elif cmd == 'grant_revoke_role':
+   if len(args) != 1:
+     print('grant_revoke_role requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.grant_revoke_role(eval(args[0]),))
+ 
+ elif cmd == 'get_principals_in_role':
+   if len(args) != 1:
+     print('get_principals_in_role requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_principals_in_role(eval(args[0]),))
+ 
+ elif cmd == 'get_role_grants_for_principal':
+   if len(args) != 1:
+     print('get_role_grants_for_principal requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_role_grants_for_principal(eval(args[0]),))
+ 
+ elif cmd == 'get_privilege_set':
+   if len(args) != 3:
+     print('get_privilege_set requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_privilege_set(eval(args[0]),args[1],eval(args[2]),))
+ 
+ elif cmd == 'list_privileges':
+   if len(args) != 3:
+     print('list_privileges requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.list_privileges(args[0],eval(args[1]),eval(args[2]),))
+ 
+ elif cmd == 'grant_privileges':
+   if len(args) != 1:
+     print('grant_privileges requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.grant_privileges(eval(args[0]),))
+ 
+ elif cmd == 'revoke_privileges':
+   if len(args) != 1:
+     print('revoke_privileges requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.revoke_privileges(eval(args[0]),))
+ 
+ elif cmd == 'grant_revoke_privileges':
+   if len(args) != 1:
+     print('grant_revoke_privileges requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.grant_revoke_privileges(eval(args[0]),))
+ 
+ elif cmd == 'refresh_privileges':
+   if len(args) != 3:
+     print('refresh_privileges requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.refresh_privileges(eval(args[0]),args[1],eval(args[2]),))
+ 
+ elif cmd == 'set_ugi':
+   if len(args) != 2:
+     print('set_ugi requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.set_ugi(args[0],eval(args[1]),))
+ 
+ elif cmd == 'get_delegation_token':
+   if len(args) != 2:
+     print('get_delegation_token requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.get_delegation_token(args[0],args[1],))
+ 
+ elif cmd == 'renew_delegation_token':
+   if len(args) != 1:
+     print('renew_delegation_token requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.renew_delegation_token(args[0],))
+ 
+ elif cmd == 'cancel_delegation_token':
+   if len(args) != 1:
+     print('cancel_delegation_token requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.cancel_delegation_token(args[0],))
+ 
+ elif cmd == 'add_token':
+   if len(args) != 2:
+     print('add_token requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.add_token(args[0],args[1],))
+ 
+ elif cmd == 'remove_token':
+   if len(args) != 1:
+     print('remove_token requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.remove_token(args[0],))
+ 
+ elif cmd == 'get_token':
+   if len(args) != 1:
+     print('get_token requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_token(args[0],))
+ 
+ elif cmd == 'get_all_token_identifiers':
+   if len(args) != 0:
+     print('get_all_token_identifiers requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_all_token_identifiers())
+ 
+ elif cmd == 'add_master_key':
+   if len(args) != 1:
+     print('add_master_key requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_master_key(args[0],))
+ 
+ elif cmd == 'update_master_key':
+   if len(args) != 2:
+     print('update_master_key requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.update_master_key(eval(args[0]),args[1],))
+ 
+ elif cmd == 'remove_master_key':
+   if len(args) != 1:
+     print('remove_master_key requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.remove_master_key(eval(args[0]),))
+ 
+ elif cmd == 'get_master_keys':
+   if len(args) != 0:
+     print('get_master_keys requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_master_keys())
+ 
+ elif cmd == 'get_open_txns':
+   if len(args) != 0:
+     print('get_open_txns requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_open_txns())
+ 
+ elif cmd == 'get_open_txns_info':
+   if len(args) != 0:
+     print('get_open_txns_info requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_open_txns_info())
+ 
+ elif cmd == 'open_txns':
+   if len(args) != 1:
+     print('open_txns requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.open_txns(eval(args[0]),))
+ 
+ elif cmd == 'abort_txn':
+   if len(args) != 1:
+     print('abort_txn requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.abort_txn(eval(args[0]),))
+ 
+ elif cmd == 'abort_txns':
+   if len(args) != 1:
+     print('abort_txns requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.abort_txns(eval(args[0]),))
+ 
+ elif cmd == 'commit_txn':
+   if len(args) != 1:
+     print('commit_txn requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.commit_txn(eval(args[0]),))
+ 
+ elif cmd == 'repl_tbl_writeid_state':
+   if len(args) != 1:
+     print('repl_tbl_writeid_state requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.repl_tbl_writeid_state(eval(args[0]),))
+ 
+ elif cmd == 'get_valid_write_ids':
+   if len(args) != 1:
+     print('get_valid_write_ids requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_valid_write_ids(eval(args[0]),))
+ 
+ elif cmd == 'allocate_table_write_ids':
+   if len(args) != 1:
+     print('allocate_table_write_ids requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.allocate_table_write_ids(eval(args[0]),))
+ 
+ elif cmd == 'lock':
+   if len(args) != 1:
+     print('lock requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.lock(eval(args[0]),))
+ 
+ elif cmd == 'check_lock':
+   if len(args) != 1:
+     print('check_lock requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.check_lock(eval(args[0]),))
+ 
+ elif cmd == 'unlock':
+   if len(args) != 1:
+     print('unlock requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.unlock(eval(args[0]),))
+ 
+ elif cmd == 'show_locks':
+   if len(args) != 1:
+     print('show_locks requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.show_locks(eval(args[0]),))
+ 
+ elif cmd == 'heartbeat':
+   if len(args) != 1:
+     print('heartbeat requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.heartbeat(eval(args[0]),))
+ 
+ elif cmd == 'heartbeat_txn_range':
+   if len(args) != 1:
+     print('heartbeat_txn_range requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.heartbeat_txn_range(eval(args[0]),))
+ 
+ elif cmd == 'compact':
+   if len(args) != 1:
+     print('compact requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.compact(eval(args[0]),))
+ 
+ elif cmd == 'compact2':
+   if len(args) != 1:
+     print('compact2 requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.compact2(eval(args[0]),))
+ 
+ elif cmd == 'show_compact':
+   if len(args) != 1:
+     print('show_compact requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.show_compact(eval(args[0]),))
+ 
+ elif cmd == 'add_dynamic_partitions':
+   if len(args) != 1:
+     print('add_dynamic_partitions requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_dynamic_partitions(eval(args[0]),))
+ 
+ elif cmd == 'get_next_notification':
+   if len(args) != 1:
+     print('get_next_notification requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_next_notification(eval(args[0]),))
+ 
+ elif cmd == 'get_current_notificationEventId':
+   if len(args) != 0:
+     print('get_current_notificationEventId requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_current_notificationEventId())
+ 
+ elif cmd == 'get_notification_events_count':
+   if len(args) != 1:
+     print('get_notification_events_count requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_notification_events_count(eval(args[0]),))
+ 
+ elif cmd == 'fire_listener_event':
+   if len(args) != 1:
+     print('fire_listener_event requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.fire_listener_event(eval(args[0]),))
+ 
+ elif cmd == 'flushCache':
+   if len(args) != 0:
+     print('flushCache requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.flushCache())
+ 
+ elif cmd == 'add_write_notification_log':
+   if len(args) != 1:
+     print('add_write_notification_log requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_write_notification_log(eval(args[0]),))
+ 
+ elif cmd == 'cm_recycle':
+   if len(args) != 1:
+     print('cm_recycle requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.cm_recycle(eval(args[0]),))
+ 
+ elif cmd == 'get_file_metadata_by_expr':
+   if len(args) != 1:
+     print('get_file_metadata_by_expr requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_file_metadata_by_expr(eval(args[0]),))
+ 
+ elif cmd == 'get_file_metadata':
+   if len(args) != 1:
+     print('get_file_metadata requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_file_metadata(eval(args[0]),))
+ 
+ elif cmd == 'put_file_metadata':
+   if len(args) != 1:
+     print('put_file_metadata requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.put_file_metadata(eval(args[0]),))
+ 
+ elif cmd == 'clear_file_metadata':
+   if len(args) != 1:
+     print('clear_file_metadata requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.clear_file_metadata(eval(args[0]),))
+ 
+ elif cmd == 'cache_file_metadata':
+   if len(args) != 1:
+     print('cache_file_metadata requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.cache_file_metadata(eval(args[0]),))
+ 
+ elif cmd == 'get_metastore_db_uuid':
+   if len(args) != 0:
+     print('get_metastore_db_uuid requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.get_metastore_db_uuid())
+ 
+ elif cmd == 'create_resource_plan':
+   if len(args) != 1:
+     print('create_resource_plan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_resource_plan(eval(args[0]),))
+ 
+ elif cmd == 'get_resource_plan':
+   if len(args) != 1:
+     print('get_resource_plan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_resource_plan(eval(args[0]),))
+ 
+ elif cmd == 'get_active_resource_plan':
+   if len(args) != 1:
+     print('get_active_resource_plan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_active_resource_plan(eval(args[0]),))
+ 
+ elif cmd == 'get_all_resource_plans':
+   if len(args) != 1:
+     print('get_all_resource_plans requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_all_resource_plans(eval(args[0]),))
+ 
+ elif cmd == 'alter_resource_plan':
+   if len(args) != 1:
+     print('alter_resource_plan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.alter_resource_plan(eval(args[0]),))
+ 
+ elif cmd == 'validate_resource_plan':
+   if len(args) != 1:
+     print('validate_resource_plan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.validate_resource_plan(eval(args[0]),))
+ 
+ elif cmd == 'drop_resource_plan':
+   if len(args) != 1:
+     print('drop_resource_plan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_resource_plan(eval(args[0]),))
+ 
+ elif cmd == 'create_wm_trigger':
+   if len(args) != 1:
+     print('create_wm_trigger requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_wm_trigger(eval(args[0]),))
+ 
+ elif cmd == 'alter_wm_trigger':
+   if len(args) != 1:
+     print('alter_wm_trigger requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.alter_wm_trigger(eval(args[0]),))
+ 
+ elif cmd == 'drop_wm_trigger':
+   if len(args) != 1:
+     print('drop_wm_trigger requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_wm_trigger(eval(args[0]),))
+ 
+ elif cmd == 'get_triggers_for_resourceplan':
+   if len(args) != 1:
+     print('get_triggers_for_resourceplan requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_triggers_for_resourceplan(eval(args[0]),))
+ 
+ elif cmd == 'create_wm_pool':
+   if len(args) != 1:
+     print('create_wm_pool requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_wm_pool(eval(args[0]),))
+ 
+ elif cmd == 'alter_wm_pool':
+   if len(args) != 1:
+     print('alter_wm_pool requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.alter_wm_pool(eval(args[0]),))
+ 
+ elif cmd == 'drop_wm_pool':
+   if len(args) != 1:
+     print('drop_wm_pool requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_wm_pool(eval(args[0]),))
+ 
+ elif cmd == 'create_or_update_wm_mapping':
+   if len(args) != 1:
+     print('create_or_update_wm_mapping requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_or_update_wm_mapping(eval(args[0]),))
+ 
+ elif cmd == 'drop_wm_mapping':
+   if len(args) != 1:
+     print('drop_wm_mapping requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_wm_mapping(eval(args[0]),))
+ 
+ elif cmd == 'create_or_drop_wm_trigger_to_pool_mapping':
+   if len(args) != 1:
+     print('create_or_drop_wm_trigger_to_pool_mapping requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_or_drop_wm_trigger_to_pool_mapping(eval(args[0]),))
+ 
+ elif cmd == 'create_ischema':
+   if len(args) != 1:
+     print('create_ischema requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.create_ischema(eval(args[0]),))
+ 
+ elif cmd == 'alter_ischema':
+   if len(args) != 1:
+     print('alter_ischema requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.alter_ischema(eval(args[0]),))
+ 
+ elif cmd == 'get_ischema':
+   if len(args) != 1:
+     print('get_ischema requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_ischema(eval(args[0]),))
+ 
+ elif cmd == 'drop_ischema':
+   if len(args) != 1:
+     print('drop_ischema requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_ischema(eval(args[0]),))
+ 
+ elif cmd == 'add_schema_version':
+   if len(args) != 1:
+     print('add_schema_version requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_schema_version(eval(args[0]),))
+ 
+ elif cmd == 'get_schema_version':
+   if len(args) != 1:
+     print('get_schema_version requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_schema_version(eval(args[0]),))
+ 
+ elif cmd == 'get_schema_latest_version':
+   if len(args) != 1:
+     print('get_schema_latest_version requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_schema_latest_version(eval(args[0]),))
+ 
+ elif cmd == 'get_schema_all_versions':
+   if len(args) != 1:
+     print('get_schema_all_versions requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_schema_all_versions(eval(args[0]),))
+ 
+ elif cmd == 'drop_schema_version':
+   if len(args) != 1:
+     print('drop_schema_version requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.drop_schema_version(eval(args[0]),))
+ 
+ elif cmd == 'get_schemas_by_cols':
+   if len(args) != 1:
+     print('get_schemas_by_cols requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_schemas_by_cols(eval(args[0]),))
+ 
+ elif cmd == 'map_schema_version_to_serde':
+   if len(args) != 1:
+     print('map_schema_version_to_serde requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.map_schema_version_to_serde(eval(args[0]),))
+ 
+ elif cmd == 'set_schema_version_state':
+   if len(args) != 1:
+     print('set_schema_version_state requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.set_schema_version_state(eval(args[0]),))
+ 
+ elif cmd == 'add_serde':
+   if len(args) != 1:
+     print('add_serde requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_serde(eval(args[0]),))
+ 
+ elif cmd == 'get_serde':
+   if len(args) != 1:
+     print('get_serde requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_serde(eval(args[0]),))
+ 
+ elif cmd == 'get_lock_materialization_rebuild':
+   if len(args) != 3:
+     print('get_lock_materialization_rebuild requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.get_lock_materialization_rebuild(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'heartbeat_lock_materialization_rebuild':
+   if len(args) != 3:
+     print('heartbeat_lock_materialization_rebuild requires 3 args')
+     sys.exit(1)
+   pp.pprint(client.heartbeat_lock_materialization_rebuild(args[0],args[1],eval(args[2]),))
+ 
+ elif cmd == 'add_runtime_stats':
+   if len(args) != 1:
+     print('add_runtime_stats requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.add_runtime_stats(eval(args[0]),))
+ 
+ elif cmd == 'get_runtime_stats':
+   if len(args) != 1:
+     print('get_runtime_stats requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.get_runtime_stats(eval(args[0]),))
+ 
+ elif cmd == 'getName':
+   if len(args) != 0:
+     print('getName requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getName())
+ 
+ elif cmd == 'getVersion':
+   if len(args) != 0:
+     print('getVersion requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getVersion())
+ 
+ elif cmd == 'getStatus':
+   if len(args) != 0:
+     print('getStatus requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getStatus())
+ 
+ elif cmd == 'getStatusDetails':
+   if len(args) != 0:
+     print('getStatusDetails requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getStatusDetails())
+ 
+ elif cmd == 'getCounters':
+   if len(args) != 0:
+     print('getCounters requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getCounters())
+ 
+ elif cmd == 'getCounter':
+   if len(args) != 1:
+     print('getCounter requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.getCounter(args[0],))
+ 
+ elif cmd == 'setOption':
+   if len(args) != 2:
+     print('setOption requires 2 args')
+     sys.exit(1)
+   pp.pprint(client.setOption(args[0],args[1],))
+ 
+ elif cmd == 'getOption':
+   if len(args) != 1:
+     print('getOption requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.getOption(args[0],))
+ 
+ elif cmd == 'getOptions':
+   if len(args) != 0:
+     print('getOptions requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.getOptions())
+ 
+ elif cmd == 'getCpuProfile':
+   if len(args) != 1:
+     print('getCpuProfile requires 1 args')
+     sys.exit(1)
+   pp.pprint(client.getCpuProfile(eval(args[0]),))
+ 
+ elif cmd == 'aliveSince':
+   if len(args) != 0:
+     print('aliveSince requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.aliveSince())
+ 
+ elif cmd == 'reinitialize':
+   if len(args) != 0:
+     print('reinitialize requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.reinitialize())
+ 
+ elif cmd == 'shutdown':
+   if len(args) != 0:
+     print('shutdown requires 0 args')
+     sys.exit(1)
+   pp.pprint(client.shutdown())
+ 
+ else:
+   print('Unrecognized method %s' % cmd)
+   sys.exit(1)
+ 
+ transport.close()


[61/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
index 0000000,968f4a4..29d545a
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mysql/hive-schema-4.0.0.mysql.sql
@@@ -1,0 -1,1208 +1,1210 @@@
+ -- MySQL dump 10.13  Distrib 5.5.25, for osx10.6 (i386)
+ --
+ -- Host: localhost    Database: test
+ -- ------------------------------------------------------
+ -- Server version	5.5.25
+ 
+ /*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+ /*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+ /*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+ /*!40101 SET NAMES utf8 */;
+ /*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
+ /*!40103 SET TIME_ZONE='+00:00' */;
+ /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
+ /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
+ /*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
+ /*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
+ 
+ --
+ -- Table structure for table `BUCKETING_COLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `BUCKETING_COLS_N49` (`SD_ID`),
+   CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `CDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `CDS` (
+   `CD_ID` bigint(20) NOT NULL,
+   PRIMARY KEY (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `COLUMNS_V2`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `COLUMNS_V2` (
+   `CD_ID` bigint(20) NOT NULL,
+   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TYPE_NAME` MEDIUMTEXT DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`CD_ID`,`COLUMN_NAME`),
+   KEY `COLUMNS_V2_N49` (`CD_ID`),
+   CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `DATABASE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` (
+   `DB_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`DB_ID`,`PARAM_KEY`),
+   KEY `DATABASE_PARAMS_N49` (`DB_ID`),
+   CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ CREATE TABLE `CTLGS` (
+     `CTLG_ID` BIGINT PRIMARY KEY,
+     `NAME` VARCHAR(256),
+     `DESC` VARCHAR(4000),
+     `LOCATION_URI` VARCHAR(4000) NOT NULL,
+     UNIQUE KEY `UNIQUE_CATALOG` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ --
+ -- Table structure for table `DBS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DBS` (
+   `DB_ID` bigint(20) NOT NULL,
+   `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CTLG_NAME` varchar(256) NOT NULL,
+   PRIMARY KEY (`DB_ID`),
+   UNIQUE KEY `UNIQUE_DATABASE` (`NAME`, `CTLG_NAME`),
+   CONSTRAINT `CTLG_FK1` FOREIGN KEY (`CTLG_NAME`) REFERENCES `CTLGS` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `DB_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `DB_PRIVS` (
+   `DB_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DB_ID` bigint(20) DEFAULT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`DB_GRANT_ID`),
+   UNIQUE KEY `DBPRIVILEGEINDEX` (`AUTHORIZER`,`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `DB_PRIVS_N49` (`DB_ID`),
+   CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `GLOBAL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` (
+   `USER_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`USER_GRANT_ID`),
+   UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`AUTHORIZER`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `IDXS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `IDXS` (
+   `INDEX_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DEFERRED_REBUILD` bit(1) NOT NULL,
+   `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INDEX_TBL_ID` bigint(20) DEFAULT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `ORIG_TBL_ID` bigint(20) DEFAULT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`INDEX_ID`),
+   UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`),
+   KEY `IDXS_N51` (`SD_ID`),
+   KEY `IDXS_N50` (`INDEX_TBL_ID`),
+   KEY `IDXS_N49` (`ORIG_TBL_ID`),
+   CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+   CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+   CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `INDEX_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` (
+   `INDEX_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`),
+   KEY `INDEX_PARAMS_N49` (`INDEX_ID`),
+   CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `NUCLEUS_TABLES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` (
+   `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`CLASS_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITIONS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITIONS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
++  `WRITE_ID` bigint(20) DEFAULT 0,
+   PRIMARY KEY (`PART_ID`),
+   UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`),
+   KEY `PARTITIONS_N49` (`TBL_ID`),
+   KEY `PARTITIONS_N50` (`SD_ID`),
+   CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`),
+   CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_EVENTS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` (
+   `PART_NAME_ID` bigint(20) NOT NULL,
+   `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `EVENT_TIME` bigint(20) NOT NULL,
+   `EVENT_TYPE` int(11) NOT NULL,
+   `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_NAME_ID`),
+   KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_KEYS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`TBL_ID`,`PKEY_NAME`),
+   KEY `PARTITION_KEYS_N49` (`TBL_ID`),
+   CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_KEY_VALS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`PART_ID`,`INTEGER_IDX`),
+   KEY `PARTITION_KEY_VALS_N49` (`PART_ID`),
+   CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PARTITION_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` (
+   `PART_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_ID`,`PARAM_KEY`),
+   KEY `PARTITION_PARAMS_N49` (`PART_ID`),
+   CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PART_COL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` (
+   `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_ID` bigint(20) DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_COLUMN_GRANT_ID`),
+   KEY `PART_COL_PRIVS_N49` (`PART_ID`),
+   KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `PART_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `PART_PRIVS` (
+   `PART_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_ID` bigint(20) DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`PART_GRANT_ID`),
+   KEY `PARTPRIVILEGEINDEX` (`AUTHORIZER`,`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `PART_PRIVS_N49` (`PART_ID`),
+   CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `ROLES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `ROLES` (
+   `ROLE_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`ROLE_ID`),
+   UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `ROLE_MAP`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `ROLE_MAP` (
+   `ROLE_GRANT_ID` bigint(20) NOT NULL,
+   `ADD_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ROLE_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`ROLE_GRANT_ID`),
+   UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `ROLE_MAP_N49` (`ROLE_ID`),
+   CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SDS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `CD_ID` bigint(20) DEFAULT NULL,
+   `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `IS_COMPRESSED` bit(1) NOT NULL,
+   `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL,
+   `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `NUM_BUCKETS` int(11) NOT NULL,
+   `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERDE_ID` bigint(20) DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`),
+   KEY `SDS_N49` (`SERDE_ID`),
+   KEY `SDS_N50` (`CD_ID`),
+   CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+   CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SD_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SD_PARAMS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`,`PARAM_KEY`),
+   KEY `SD_PARAMS_N49` (`SD_ID`),
+   CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SEQUENCE_TABLE`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` (
+   `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `NEXT_VAL` bigint(20) NOT NULL,
+   PRIMARY KEY (`SEQUENCE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MNotificationLog', 1);
+ 
+ --
+ -- Table structure for table `SERDES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SERDES` (
+   `SERDE_ID` bigint(20) NOT NULL,
+   `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DESCRIPTION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `DESERIALIZER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `SERDE_TYPE` integer,
+   PRIMARY KEY (`SERDE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SERDE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` (
+   `SERDE_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`),
+   KEY `SERDE_PARAMS_N49` (`SERDE_ID`),
+   CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_COL_NAMES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` (
+   `SD_ID` bigint(20) NOT NULL,
+   `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `SKEWED_COL_NAMES_N49` (`SD_ID`),
+   CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_COL_VALUE_LOC_MAP`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` (
+   `SD_ID` bigint(20) NOT NULL,
+   `STRING_LIST_ID_KID` bigint(20) NOT NULL,
+   `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`),
+   KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`),
+   KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`),
+   CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_STRING_LIST`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` (
+   `STRING_LIST_ID` bigint(20) NOT NULL,
+   PRIMARY KEY (`STRING_LIST_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_STRING_LIST_VALUES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` (
+   `STRING_LIST_ID` bigint(20) NOT NULL,
+   `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`),
+   KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SKEWED_VALUES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` (
+   `SD_ID_OID` bigint(20) NOT NULL,
+   `STRING_LIST_ID_EID` bigint(20) NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`),
+   KEY `SKEWED_VALUES_N50` (`SD_ID_OID`),
+   KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`),
+   CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`),
+   CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `SORT_COLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `SORT_COLS` (
+   `SD_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `ORDER` int(11) NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`SD_ID`,`INTEGER_IDX`),
+   KEY `SORT_COLS_N49` (`SD_ID`),
+   CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TABLE_PARAMS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `PARAM_VALUE` MEDIUMTEXT CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_ID`,`PARAM_KEY`),
+   KEY `TABLE_PARAMS_N49` (`TBL_ID`),
+   CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `MV_CREATION_METADATA`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `MV_CREATION_METADATA` (
+   `MV_CREATION_METADATA_ID` bigint(20) NOT NULL,
+   `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `TXN_LIST` TEXT DEFAULT NULL,
+   PRIMARY KEY (`MV_CREATION_METADATA_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ CREATE INDEX MV_UNIQUE_TABLE ON MV_CREATION_METADATA (TBL_NAME, DB_NAME) USING BTREE;
+ 
+ --
+ -- Table structure for table `TBLS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBLS` (
+   `TBL_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `DB_ID` bigint(20) DEFAULT NULL,
+   `LAST_ACCESS_TIME` int(11) NOT NULL,
+   `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `RETENTION` int(11) NOT NULL,
+   `SD_ID` bigint(20) DEFAULT NULL,
+   `TBL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `VIEW_EXPANDED_TEXT` mediumtext,
+   `VIEW_ORIGINAL_TEXT` mediumtext,
+   `IS_REWRITE_ENABLED` bit(1) NOT NULL DEFAULT 0,
++  `WRITE_ID` bigint(20) DEFAULT 0,
+   PRIMARY KEY (`TBL_ID`),
+   UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`),
+   KEY `TBLS_N50` (`SD_ID`),
+   KEY `TBLS_N49` (`DB_ID`),
+   CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`),
+   CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `MV_TABLES_USED`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `MV_TABLES_USED` (
+   `MV_CREATION_METADATA_ID` bigint(20) NOT NULL,
+   `TBL_ID` bigint(20) NOT NULL,
+   CONSTRAINT `MV_TABLES_USED_FK1` FOREIGN KEY (`MV_CREATION_METADATA_ID`) REFERENCES `MV_CREATION_METADATA` (`MV_CREATION_METADATA_ID`),
+   CONSTRAINT `MV_TABLES_USED_FK2` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TBL_COL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` (
+   `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL,
+   `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_COLUMN_GRANT_ID`),
+   KEY `TABLECOLUMNPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   KEY `TBL_COL_PRIVS_N49` (`TBL_ID`),
+   CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TBL_PRIVS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TBL_PRIVS` (
+   `TBL_GRANT_ID` bigint(20) NOT NULL,
+   `CREATE_TIME` int(11) NOT NULL,
+   `GRANT_OPTION` smallint(6) NOT NULL,
+   `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TBL_ID` bigint(20) DEFAULT NULL,
+   `AUTHORIZER` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TBL_GRANT_ID`),
+   KEY `TBL_PRIVS_N49` (`TBL_ID`),
+   KEY `TABLEPRIVILEGEINDEX` (`AUTHORIZER`,`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`),
+   CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TAB_COL_STATS`
+ --
+ CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` (
+  `CS_ID` bigint(20) NOT NULL,
+  `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TBL_ID` bigint(20) NOT NULL,
+  `LONG_LOW_VALUE` bigint(20),
+  `LONG_HIGH_VALUE` bigint(20),
+  `DOUBLE_HIGH_VALUE` double(53,4),
+  `DOUBLE_LOW_VALUE` double(53,4),
+  `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `NUM_NULLS` bigint(20) NOT NULL,
+  `NUM_DISTINCTS` bigint(20),
+  `BIT_VECTOR` blob,
+  `AVG_COL_LEN` double(53,4),
+  `MAX_COL_LEN` bigint(20),
+  `NUM_TRUES` bigint(20),
+  `NUM_FALSES` bigint(20),
+  `LAST_ANALYZED` bigint(20) NOT NULL,
+   PRIMARY KEY (`CS_ID`),
+   CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX TAB_COL_STATS_IDX ON TAB_COL_STATS (CAT_NAME, DB_NAME, TABLE_NAME, COLUMN_NAME) USING BTREE;
+ --
+ -- Table structure for table `PART_COL_STATS`
+ --
+ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` (
+  `CS_ID` bigint(20) NOT NULL,
+  `CAT_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `TABLE_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+  `PART_ID` bigint(20) NOT NULL,
+  `LONG_LOW_VALUE` bigint(20),
+  `LONG_HIGH_VALUE` bigint(20),
+  `DOUBLE_HIGH_VALUE` double(53,4),
+  `DOUBLE_LOW_VALUE` double(53,4),
+  `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+  `NUM_NULLS` bigint(20) NOT NULL,
+  `NUM_DISTINCTS` bigint(20),
+  `BIT_VECTOR` blob,
+  `AVG_COL_LEN` double(53,4),
+  `MAX_COL_LEN` bigint(20),
+  `NUM_TRUES` bigint(20),
+  `NUM_FALSES` bigint(20),
+  `LAST_ANALYZED` bigint(20) NOT NULL,
+   PRIMARY KEY (`CS_ID`),
+   CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (CAT_NAME, DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE;
+ 
+ --
+ -- Table structure for table `TYPES`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TYPES` (
+   `TYPES_ID` bigint(20) NOT NULL,
+   `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   PRIMARY KEY (`TYPES_ID`),
+   UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ 
+ --
+ -- Table structure for table `TYPE_FIELDS`
+ --
+ 
+ /*!40101 SET @saved_cs_client     = @@character_set_client */;
+ /*!40101 SET character_set_client = utf8 */;
+ CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` (
+   `TYPE_NAME` bigint(20) NOT NULL,
+   `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL,
+   `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL,
+   `INTEGER_IDX` int(11) NOT NULL,
+   PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`),
+   KEY `TYPE_FIELDS_N49` (`TYPE_NAME`),
+   CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ -- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey]
+ CREATE TABLE IF NOT EXISTS `MASTER_KEYS`
+ (
+     `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT,
+     `MASTER_KEY` VARCHAR(767) BINARY NULL,
+     PRIMARY KEY (`KEY_ID`)
+ ) ENGINE=INNODB DEFAULT CHARSET=latin1;
+ 
+ -- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken]
+ CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS`
+ (
+     `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL,
+     `TOKEN` VARCHAR(767) BINARY NULL,
+     PRIMARY KEY (`TOKEN_IDENT`)
+ ) ENGINE=INNODB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for VERSION
+ --
+ CREATE TABLE IF NOT EXISTS `VERSION` (
+   `VER_ID` BIGINT NOT NULL,
+   `SCHEMA_VERSION` VARCHAR(127) NOT NULL,
+   `VERSION_COMMENT` VARCHAR(255),
+   PRIMARY KEY (`VER_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for table FUNCS
+ --
+ CREATE TABLE IF NOT EXISTS `FUNCS` (
+   `FUNC_ID` BIGINT(20) NOT NULL,
+   `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+   `CREATE_TIME` INT(11) NOT NULL,
+   `DB_ID` BIGINT(20),
+   `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+   `FUNC_TYPE` INT(11) NOT NULL,
+   `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin,
+   `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin,
+   PRIMARY KEY (`FUNC_ID`),
+   UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`),
+   KEY `FUNCS_N49` (`DB_ID`),
+   CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ --
+ -- Table structure for table FUNC_RU
+ --
+ CREATE TABLE IF NOT EXISTS `FUNC_RU` (
+   `FUNC_ID` BIGINT(20) NOT NULL,
+   `RESOURCE_TYPE` INT(11) NOT NULL,
+   `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin,
+   `INTEGER_IDX` INT(11) NOT NULL,
+   PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`),
+   CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG`
+ (
+     `NL_ID` BIGINT(20) NOT NULL,
+     `EVENT_ID` BIGINT(20) NOT NULL,
+     `EVENT_TIME` INT(11) NOT NULL,
+     `EVENT_TYPE` varchar(32) NOT NULL,
+     `CAT_NAME` varchar(256),
+     `DB_NAME` varchar(128),
+     `TBL_NAME` varchar(256),
+     `MESSAGE` longtext,
+     `MESSAGE_FORMAT` varchar(16),
+     PRIMARY KEY (`NL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE`
+ (
+     `NNI_ID` BIGINT(20) NOT NULL,
+     `NEXT_EVENT_ID` BIGINT(20) NOT NULL,
+     PRIMARY KEY (`NNI_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ INSERT INTO `NOTIFICATION_SEQUENCE` (`NNI_ID`, `NEXT_EVENT_ID`) SELECT * from (select 1 as `NNI_ID`, 1 as `NOTIFICATION_SEQUENCE`) a WHERE (SELECT COUNT(*) FROM `NOTIFICATION_SEQUENCE`) = 0;
+ 
+ CREATE TABLE IF NOT EXISTS `KEY_CONSTRAINTS`
+ (
+   `CHILD_CD_ID` BIGINT,
+   `CHILD_INTEGER_IDX` INT(11),
+   `CHILD_TBL_ID` BIGINT,
+   `PARENT_CD_ID` BIGINT,
+   `PARENT_INTEGER_IDX` INT(11) NOT NULL,
+   `PARENT_TBL_ID` BIGINT NOT NULL,
+   `POSITION` BIGINT NOT NULL,
+   `CONSTRAINT_NAME` VARCHAR(400) NOT NULL,
+   `CONSTRAINT_TYPE` SMALLINT(6)  NOT NULL,
+   `UPDATE_RULE` SMALLINT(6),
+   `DELETE_RULE` SMALLINT(6),
+   `ENABLE_VALIDATE_RELY` SMALLINT(6) NOT NULL,
+   `DEFAULT_VALUE` VARCHAR(400),
+   PRIMARY KEY (`CONSTRAINT_NAME`, `POSITION`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX `CONSTRAINTS_PARENT_TABLE_ID_INDEX` ON KEY_CONSTRAINTS (`PARENT_TBL_ID`) USING BTREE;
+ 
+ CREATE INDEX `CONSTRAINTS_CONSTRAINT_TYPE_INDEX` ON KEY_CONSTRAINTS (`CONSTRAINT_TYPE`) USING BTREE;
+ 
+ -- -----------------------------
+ -- Metastore DB Properties table
+ -- -----------------------------
+ CREATE TABLE IF NOT EXISTS `METASTORE_DB_PROPERTIES` (
+   `PROPERTY_KEY` varchar(255) NOT NULL,
+   `PROPERTY_VALUE` varchar(1000) NOT NULL,
+   `DESCRIPTION` varchar(1000),
+  PRIMARY KEY(`PROPERTY_KEY`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ -- ---------------------
+ -- Resource plan tables.
+ -- ---------------------
+ CREATE TABLE IF NOT EXISTS WM_RESOURCEPLAN (
+     `RP_ID` bigint(20) NOT NULL,
+     `NAME` varchar(128) NOT NULL,
+     `QUERY_PARALLELISM` int(11),
+     `STATUS` varchar(20) NOT NULL,
+     `DEFAULT_POOL_ID` bigint(20),
+     PRIMARY KEY (`RP_ID`),
+     UNIQUE KEY `UNIQUE_WM_RESOURCEPLAN` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_POOL
+ (
+     `POOL_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `PATH` varchar(767) NOT NULL,
+     `ALLOC_FRACTION` DOUBLE,
+     `QUERY_PARALLELISM` int(11),
+     `SCHEDULING_POLICY` varchar(767),
+     PRIMARY KEY (`POOL_ID`),
+     UNIQUE KEY `UNIQUE_WM_POOL` (`RP_ID`, `PATH`),
+     CONSTRAINT `WM_POOL_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ ALTER TABLE `WM_RESOURCEPLAN` ADD CONSTRAINT `WM_RESOURCEPLAN_FK1` FOREIGN KEY (`DEFAULT_POOL_ID`) REFERENCES `WM_POOL`(`POOL_ID`);
+ 
+ CREATE TABLE IF NOT EXISTS WM_TRIGGER
+ (
+     `TRIGGER_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `NAME` varchar(128) NOT NULL,
+     `TRIGGER_EXPRESSION` varchar(1024),
+     `ACTION_EXPRESSION` varchar(1024),
+     `IS_IN_UNMANAGED` bit(1) NOT NULL DEFAULT 0,
+     PRIMARY KEY (`TRIGGER_ID`),
+     UNIQUE KEY `UNIQUE_WM_TRIGGER` (`RP_ID`, `NAME`),
+     CONSTRAINT `WM_TRIGGER_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_POOL_TO_TRIGGER
+ (
+     `POOL_ID` bigint(20) NOT NULL,
+     `TRIGGER_ID` bigint(20) NOT NULL,
+     PRIMARY KEY (`POOL_ID`, `TRIGGER_ID`),
+     CONSTRAINT `WM_POOL_TO_TRIGGER_FK1` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`),
+     CONSTRAINT `WM_POOL_TO_TRIGGER_FK2` FOREIGN KEY (`TRIGGER_ID`) REFERENCES `WM_TRIGGER` (`TRIGGER_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE IF NOT EXISTS WM_MAPPING
+ (
+     `MAPPING_ID` bigint(20) NOT NULL,
+     `RP_ID` bigint(20) NOT NULL,
+     `ENTITY_TYPE` varchar(128) NOT NULL,
+     `ENTITY_NAME` varchar(128) NOT NULL,
+     `POOL_ID` bigint(20),
+     `ORDERING` int,
+     PRIMARY KEY (`MAPPING_ID`),
+     UNIQUE KEY `UNIQUE_WM_MAPPING` (`RP_ID`, `ENTITY_TYPE`, `ENTITY_NAME`),
+     CONSTRAINT `WM_MAPPING_FK1` FOREIGN KEY (`RP_ID`) REFERENCES `WM_RESOURCEPLAN` (`RP_ID`),
+     CONSTRAINT `WM_MAPPING_FK2` FOREIGN KEY (`POOL_ID`) REFERENCES `WM_POOL` (`POOL_ID`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ -- ----------------------------
+ -- Transaction and Lock Tables
+ -- ----------------------------
+ CREATE TABLE TXNS (
+   TXN_ID bigint PRIMARY KEY,
+   TXN_STATE char(1) NOT NULL,
+   TXN_STARTED bigint NOT NULL,
+   TXN_LAST_HEARTBEAT bigint NOT NULL,
+   TXN_USER varchar(128) NOT NULL,
+   TXN_HOST varchar(128) NOT NULL,
+   TXN_AGENT_INFO varchar(128),
+   TXN_META_INFO varchar(128),
+   TXN_HEARTBEAT_COUNT int,
+   TXN_TYPE int
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE TXN_COMPONENTS (
+   TC_TXNID bigint NOT NULL,
+   TC_DATABASE varchar(128) NOT NULL,
+   TC_TABLE varchar(128),
+   TC_PARTITION varchar(767),
+   TC_OPERATION_TYPE char(1) NOT NULL,
+   TC_WRITEID bigint,
+   FOREIGN KEY (TC_TXNID) REFERENCES TXNS (TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX TC_TXNID_INDEX ON TXN_COMPONENTS (TC_TXNID);
+ 
+ CREATE TABLE COMPLETED_TXN_COMPONENTS (
+   CTC_TXNID bigint NOT NULL,
+   CTC_DATABASE varchar(128) NOT NULL,
+   CTC_TABLE varchar(256),
+   CTC_PARTITION varchar(767),
+   CTC_TIMESTAMP timestamp DEFAULT CURRENT_TIMESTAMP NOT NULL,
+   CTC_WRITEID bigint
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX COMPLETED_TXN_COMPONENTS_IDX ON COMPLETED_TXN_COMPONENTS (CTC_DATABASE, CTC_TABLE, CTC_PARTITION) USING BTREE;
+ 
+ CREATE TABLE NEXT_TXN_ID (
+   NTXN_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_TXN_ID VALUES(1);
+ 
+ CREATE TABLE HIVE_LOCKS (
+   HL_LOCK_EXT_ID bigint NOT NULL,
+   HL_LOCK_INT_ID bigint NOT NULL,
+   HL_TXNID bigint NOT NULL,
+   HL_DB varchar(128) NOT NULL,
+   HL_TABLE varchar(128),
+   HL_PARTITION varchar(767),
+   HL_LOCK_STATE char(1) not null,
+   HL_LOCK_TYPE char(1) not null,
+   HL_LAST_HEARTBEAT bigint NOT NULL,
+   HL_ACQUIRED_AT bigint,
+   HL_USER varchar(128) NOT NULL,
+   HL_HOST varchar(128) NOT NULL,
+   HL_HEARTBEAT_COUNT int,
+   HL_AGENT_INFO varchar(128),
+   HL_BLOCKEDBY_EXT_ID bigint,
+   HL_BLOCKEDBY_INT_ID bigint,
+   PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID),
+   KEY HIVE_LOCK_TXNID_INDEX (HL_TXNID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX HL_TXNID_IDX ON HIVE_LOCKS (HL_TXNID);
+ 
+ CREATE TABLE NEXT_LOCK_ID (
+   NL_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_LOCK_ID VALUES(1);
+ 
+ CREATE TABLE COMPACTION_QUEUE (
+   CQ_ID bigint PRIMARY KEY,
+   CQ_DATABASE varchar(128) NOT NULL,
+   CQ_TABLE varchar(128) NOT NULL,
+   CQ_PARTITION varchar(767),
+   CQ_STATE char(1) NOT NULL,
+   CQ_TYPE char(1) NOT NULL,
+   CQ_TBLPROPERTIES varchar(2048),
+   CQ_WORKER_ID varchar(128),
+   CQ_START bigint,
+   CQ_RUN_AS varchar(128),
+   CQ_HIGHEST_WRITE_ID bigint,
+   CQ_META_INFO varbinary(2048),
+   CQ_HADOOP_JOB_ID varchar(32)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE COMPLETED_COMPACTIONS (
+   CC_ID bigint PRIMARY KEY,
+   CC_DATABASE varchar(128) NOT NULL,
+   CC_TABLE varchar(128) NOT NULL,
+   CC_PARTITION varchar(767),
+   CC_STATE char(1) NOT NULL,
+   CC_TYPE char(1) NOT NULL,
+   CC_TBLPROPERTIES varchar(2048),
+   CC_WORKER_ID varchar(128),
+   CC_START bigint,
+   CC_END bigint,
+   CC_RUN_AS varchar(128),
+   CC_HIGHEST_WRITE_ID bigint,
+   CC_META_INFO varbinary(2048),
+   CC_HADOOP_JOB_ID varchar(32)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE NEXT_COMPACTION_QUEUE_ID (
+   NCQ_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1);
+ 
+ CREATE TABLE AUX_TABLE (
+   MT_KEY1 varchar(128) NOT NULL,
+   MT_KEY2 bigint NOT NULL,
+   MT_COMMENT varchar(255),
+   PRIMARY KEY(MT_KEY1, MT_KEY2)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE WRITE_SET (
+   WS_DATABASE varchar(128) NOT NULL,
+   WS_TABLE varchar(128) NOT NULL,
+   WS_PARTITION varchar(767),
+   WS_TXNID bigint NOT NULL,
+   WS_COMMIT_ID bigint NOT NULL,
+   WS_OPERATION_TYPE char(1) NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE TXN_TO_WRITE_ID (
+   T2W_TXNID bigint NOT NULL,
+   T2W_DATABASE varchar(128) NOT NULL,
+   T2W_TABLE varchar(256) NOT NULL,
+   T2W_WRITEID bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE UNIQUE INDEX TBL_TO_TXN_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_TXNID);
+ CREATE UNIQUE INDEX TBL_TO_WRITE_ID_IDX ON TXN_TO_WRITE_ID (T2W_DATABASE, T2W_TABLE, T2W_WRITEID);
+ 
+ CREATE TABLE NEXT_WRITE_ID (
+   NWI_DATABASE varchar(128) NOT NULL,
+   NWI_TABLE varchar(256) NOT NULL,
+   NWI_NEXT bigint NOT NULL
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE UNIQUE INDEX NEXT_WRITE_ID_IDX ON NEXT_WRITE_ID (NWI_DATABASE, NWI_TABLE);
+ 
+ CREATE TABLE MIN_HISTORY_LEVEL (
+   MHL_TXNID bigint NOT NULL,
+   MHL_MIN_OPEN_TXNID bigint NOT NULL,
+   PRIMARY KEY(MHL_TXNID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX MIN_HISTORY_LEVEL_IDX ON MIN_HISTORY_LEVEL (MHL_MIN_OPEN_TXNID);
+ 
+ CREATE TABLE `I_SCHEMA` (
+   `SCHEMA_ID` BIGINT PRIMARY KEY,
+   `SCHEMA_TYPE` INTEGER NOT NULL,
+   `NAME` VARCHAR(256),
+   `DB_ID` BIGINT,
+   `COMPATIBILITY` INTEGER NOT NULL,
+   `VALIDATION_LEVEL` INTEGER NOT NULL,
+   `CAN_EVOLVE` bit(1) NOT NULL,
+   `SCHEMA_GROUP` VARCHAR(256),
+   `DESCRIPTION` VARCHAR(4000),
+   FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`),
+   KEY `UNIQUE_NAME` (`NAME`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE `SCHEMA_VERSION` (
+   `SCHEMA_VERSION_ID` bigint primary key,
+   `SCHEMA_ID` BIGINT,
+   `VERSION` INTEGER NOT NULL,
+   `CREATED_AT` BIGINT NOT NULL,
+   `CD_ID` BIGINT, 
+   `STATE` INTEGER NOT NULL,
+   `DESCRIPTION` VARCHAR(4000),
+   `SCHEMA_TEXT` mediumtext,
+   `FINGERPRINT` VARCHAR(256),
+   `SCHEMA_VERSION_NAME` VARCHAR(256),
+   `SERDE_ID` bigint, 
+   FOREIGN KEY (`SCHEMA_ID`) REFERENCES `I_SCHEMA` (`SCHEMA_ID`),
+   FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`),
+   FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`),
+   KEY `UNIQUE_VERSION` (`SCHEMA_ID`, `VERSION`)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE TABLE REPL_TXN_MAP (
+   RTM_REPL_POLICY varchar(256) NOT NULL,
+   RTM_SRC_TXN_ID bigint NOT NULL,
+   RTM_TARGET_TXN_ID bigint NOT NULL,
+   PRIMARY KEY (RTM_REPL_POLICY, RTM_SRC_TXN_ID)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ 
+ CREATE TABLE RUNTIME_STATS (
+   RS_ID bigint primary key,
+   CREATE_TIME bigint NOT NULL,
+   WEIGHT bigint NOT NULL,
+   PAYLOAD blob
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ CREATE INDEX IDX_RUNTIME_STATS_CREATE_TIME ON RUNTIME_STATS(CREATE_TIME);
+ 
+ CREATE TABLE TXN_WRITE_NOTIFICATION_LOG (
+   WNL_ID bigint NOT NULL,
+   WNL_TXNID bigint NOT NULL,
+   WNL_WRITEID bigint NOT NULL,
+   WNL_DATABASE varchar(128) NOT NULL,
+   WNL_TABLE varchar(128) NOT NULL,
+   WNL_PARTITION varchar(1024) NOT NULL,
+   WNL_TABLE_OBJ longtext NOT NULL,
+   WNL_PARTITION_OBJ longtext,
+   WNL_FILES longtext,
+   WNL_EVENT_TIME INT(11) NOT NULL,
+   PRIMARY KEY (WNL_TXNID, WNL_DATABASE, WNL_TABLE, WNL_PARTITION)
+ ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
+ 
+ INSERT INTO `SEQUENCE_TABLE` (`SEQUENCE_NAME`, `NEXT_VAL`) VALUES ('org.apache.hadoop.hive.metastore.model.MTxnWriteNotificationLog', 1);
+ 
+ -- -----------------------------------------------------------------
+ -- Record schema version. Should be the last step in the init script
+ -- -----------------------------------------------------------------
+ INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '4.0.0', 'Hive release version 4.0.0');
+ 
+ /*!40101 SET character_set_client = @saved_cs_client */;
+ /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
+ 
+ /*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
+ /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
+ /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
+ /*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+ /*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+ /*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+ /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
+ 
+ -- Dump completed on 2012-08-23  0:56:31

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
index 0000000,b3789f9..89265ad
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
+++ b/standalone-metastore/metastore-common/src/main/sql/mysql/upgrade-3.1.0-to-4.0.0.mysql.sql
@@@ -1,0 -1,6 +1,10 @@@
+ SELECT 'Upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+ 
++-- HIVE-19416
++ALTER TABLE TBLS ADD WRITE_ID bigint;
++ALTER TABLE PARTITIONS ADD WRITE_ID bigint;
++
+ -- These lines need to be last.  Insert any changes above.
+ UPDATE VERSION SET SCHEMA_VERSION='4.0.0', VERSION_COMMENT='Hive release version 4.0.0' where VER_ID=1;
+ SELECT 'Finished upgrading MetaStore schema from 3.1.0 to 4.0.0' AS ' ';
+ 


[80/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
index 0000000,93b5780..b6b3bfa
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-php/metastore/Types.php
@@@ -1,0 -1,32146 +1,33017 @@@
+ <?php
+ namespace metastore;
+ 
+ /**
+  * Autogenerated by Thrift Compiler (0.9.3)
+  *
+  * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+  *  @generated
+  */
+ use Thrift\Base\TBase;
+ use Thrift\Type\TType;
+ use Thrift\Type\TMessageType;
+ use Thrift\Exception\TException;
+ use Thrift\Exception\TProtocolException;
+ use Thrift\Protocol\TProtocol;
+ use Thrift\Protocol\TBinaryProtocolAccelerated;
+ use Thrift\Exception\TApplicationException;
+ 
+ 
+ final class HiveObjectType {
+   const GLOBAL = 1;
+   const DATABASE = 2;
+   const TABLE = 3;
+   const PARTITION = 4;
+   const COLUMN = 5;
+   static public $__names = array(
+     1 => 'GLOBAL',
+     2 => 'DATABASE',
+     3 => 'TABLE',
+     4 => 'PARTITION',
+     5 => 'COLUMN',
+   );
+ }
+ 
+ final class PrincipalType {
+   const USER = 1;
+   const ROLE = 2;
+   const GROUP = 3;
+   static public $__names = array(
+     1 => 'USER',
+     2 => 'ROLE',
+     3 => 'GROUP',
+   );
+ }
+ 
+ final class PartitionEventType {
+   const LOAD_DONE = 1;
+   static public $__names = array(
+     1 => 'LOAD_DONE',
+   );
+ }
+ 
+ final class TxnState {
+   const COMMITTED = 1;
+   const ABORTED = 2;
+   const OPEN = 3;
+   static public $__names = array(
+     1 => 'COMMITTED',
+     2 => 'ABORTED',
+     3 => 'OPEN',
+   );
+ }
+ 
+ final class LockLevel {
+   const DB = 1;
+   const TABLE = 2;
+   const PARTITION = 3;
+   static public $__names = array(
+     1 => 'DB',
+     2 => 'TABLE',
+     3 => 'PARTITION',
+   );
+ }
+ 
+ final class LockState {
+   const ACQUIRED = 1;
+   const WAITING = 2;
+   const ABORT = 3;
+   const NOT_ACQUIRED = 4;
+   static public $__names = array(
+     1 => 'ACQUIRED',
+     2 => 'WAITING',
+     3 => 'ABORT',
+     4 => 'NOT_ACQUIRED',
+   );
+ }
+ 
+ final class LockType {
+   const SHARED_READ = 1;
+   const SHARED_WRITE = 2;
+   const EXCLUSIVE = 3;
+   static public $__names = array(
+     1 => 'SHARED_READ',
+     2 => 'SHARED_WRITE',
+     3 => 'EXCLUSIVE',
+   );
+ }
+ 
+ final class CompactionType {
+   const MINOR = 1;
+   const MAJOR = 2;
+   static public $__names = array(
+     1 => 'MINOR',
+     2 => 'MAJOR',
+   );
+ }
+ 
+ final class GrantRevokeType {
+   const GRANT = 1;
+   const REVOKE = 2;
+   static public $__names = array(
+     1 => 'GRANT',
+     2 => 'REVOKE',
+   );
+ }
+ 
+ final class DataOperationType {
+   const SELECT = 1;
+   const INSERT = 2;
+   const UPDATE = 3;
+   const DELETE = 4;
+   const UNSET = 5;
+   const NO_TXN = 6;
+   static public $__names = array(
+     1 => 'SELECT',
+     2 => 'INSERT',
+     3 => 'UPDATE',
+     4 => 'DELETE',
+     5 => 'UNSET',
+     6 => 'NO_TXN',
+   );
+ }
+ 
+ final class EventRequestType {
+   const INSERT = 1;
+   const UPDATE = 2;
+   const DELETE = 3;
+   static public $__names = array(
+     1 => 'INSERT',
+     2 => 'UPDATE',
+     3 => 'DELETE',
+   );
+ }
+ 
+ final class SerdeType {
+   const HIVE = 1;
+   const SCHEMA_REGISTRY = 2;
+   static public $__names = array(
+     1 => 'HIVE',
+     2 => 'SCHEMA_REGISTRY',
+   );
+ }
+ 
+ final class SchemaType {
+   const HIVE = 1;
+   const AVRO = 2;
+   static public $__names = array(
+     1 => 'HIVE',
+     2 => 'AVRO',
+   );
+ }
+ 
+ final class SchemaCompatibility {
+   const NONE = 1;
+   const BACKWARD = 2;
+   const FORWARD = 3;
+   const BOTH = 4;
+   static public $__names = array(
+     1 => 'NONE',
+     2 => 'BACKWARD',
+     3 => 'FORWARD',
+     4 => 'BOTH',
+   );
+ }
+ 
+ final class SchemaValidation {
+   const LATEST = 1;
+   const ALL = 2;
+   static public $__names = array(
+     1 => 'LATEST',
+     2 => 'ALL',
+   );
+ }
+ 
+ final class SchemaVersionState {
+   const INITIATED = 1;
+   const START_REVIEW = 2;
+   const CHANGES_REQUIRED = 3;
+   const REVIEWED = 4;
+   const ENABLED = 5;
+   const DISABLED = 6;
+   const ARCHIVED = 7;
+   const DELETED = 8;
+   static public $__names = array(
+     1 => 'INITIATED',
+     2 => 'START_REVIEW',
+     3 => 'CHANGES_REQUIRED',
+     4 => 'REVIEWED',
+     5 => 'ENABLED',
+     6 => 'DISABLED',
+     7 => 'ARCHIVED',
+     8 => 'DELETED',
+   );
+ }
+ 
+ final class FunctionType {
+   const JAVA = 1;
+   static public $__names = array(
+     1 => 'JAVA',
+   );
+ }
+ 
+ final class ResourceType {
+   const JAR = 1;
+   const FILE = 2;
+   const ARCHIVE = 3;
+   static public $__names = array(
+     1 => 'JAR',
+     2 => 'FILE',
+     3 => 'ARCHIVE',
+   );
+ }
+ 
+ final class FileMetadataExprType {
+   const ORC_SARG = 1;
+   static public $__names = array(
+     1 => 'ORC_SARG',
+   );
+ }
+ 
+ final class ClientCapability {
+   const TEST_CAPABILITY = 1;
+   const INSERT_ONLY_TABLES = 2;
+   static public $__names = array(
+     1 => 'TEST_CAPABILITY',
+     2 => 'INSERT_ONLY_TABLES',
+   );
+ }
+ 
+ final class WMResourcePlanStatus {
+   const ACTIVE = 1;
+   const ENABLED = 2;
+   const DISABLED = 3;
+   static public $__names = array(
+     1 => 'ACTIVE',
+     2 => 'ENABLED',
+     3 => 'DISABLED',
+   );
+ }
+ 
+ final class WMPoolSchedulingPolicy {
+   const FAIR = 1;
+   const FIFO = 2;
+   static public $__names = array(
+     1 => 'FAIR',
+     2 => 'FIFO',
+   );
+ }
+ 
+ class Version {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $version = null;
+   /**
+    * @var string
+    */
+   public $comments = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'version',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'comments',
+           'type' => TType::STRING,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['version'])) {
+         $this->version = $vals['version'];
+       }
+       if (isset($vals['comments'])) {
+         $this->comments = $vals['comments'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'Version';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->version);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->comments);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('Version');
+     if ($this->version !== null) {
+       $xfer += $output->writeFieldBegin('version', TType::STRING, 1);
+       $xfer += $output->writeString($this->version);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->comments !== null) {
+       $xfer += $output->writeFieldBegin('comments', TType::STRING, 2);
+       $xfer += $output->writeString($this->comments);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class FieldSchema {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $name = null;
+   /**
+    * @var string
+    */
+   public $type = null;
+   /**
+    * @var string
+    */
+   public $comment = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'name',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'type',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'comment',
+           'type' => TType::STRING,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['name'])) {
+         $this->name = $vals['name'];
+       }
+       if (isset($vals['type'])) {
+         $this->type = $vals['type'];
+       }
+       if (isset($vals['comment'])) {
+         $this->comment = $vals['comment'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'FieldSchema';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->type);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->comment);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('FieldSchema');
+     if ($this->name !== null) {
+       $xfer += $output->writeFieldBegin('name', TType::STRING, 1);
+       $xfer += $output->writeString($this->name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->type !== null) {
+       $xfer += $output->writeFieldBegin('type', TType::STRING, 2);
+       $xfer += $output->writeString($this->type);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->comment !== null) {
+       $xfer += $output->writeFieldBegin('comment', TType::STRING, 3);
+       $xfer += $output->writeString($this->comment);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class SQLPrimaryKey {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $table_db = null;
+   /**
+    * @var string
+    */
+   public $table_name = null;
+   /**
+    * @var string
+    */
+   public $column_name = null;
+   /**
+    * @var int
+    */
+   public $key_seq = null;
+   /**
+    * @var string
+    */
+   public $pk_name = null;
+   /**
+    * @var bool
+    */
+   public $enable_cstr = null;
+   /**
+    * @var bool
+    */
+   public $validate_cstr = null;
+   /**
+    * @var bool
+    */
+   public $rely_cstr = null;
+   /**
+    * @var string
+    */
+   public $catName = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'table_db',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'table_name',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'column_name',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'key_seq',
+           'type' => TType::I32,
+           ),
+         5 => array(
+           'var' => 'pk_name',
+           'type' => TType::STRING,
+           ),
+         6 => array(
+           'var' => 'enable_cstr',
+           'type' => TType::BOOL,
+           ),
+         7 => array(
+           'var' => 'validate_cstr',
+           'type' => TType::BOOL,
+           ),
+         8 => array(
+           'var' => 'rely_cstr',
+           'type' => TType::BOOL,
+           ),
+         9 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['table_db'])) {
+         $this->table_db = $vals['table_db'];
+       }
+       if (isset($vals['table_name'])) {
+         $this->table_name = $vals['table_name'];
+       }
+       if (isset($vals['column_name'])) {
+         $this->column_name = $vals['column_name'];
+       }
+       if (isset($vals['key_seq'])) {
+         $this->key_seq = $vals['key_seq'];
+       }
+       if (isset($vals['pk_name'])) {
+         $this->pk_name = $vals['pk_name'];
+       }
+       if (isset($vals['enable_cstr'])) {
+         $this->enable_cstr = $vals['enable_cstr'];
+       }
+       if (isset($vals['validate_cstr'])) {
+         $this->validate_cstr = $vals['validate_cstr'];
+       }
+       if (isset($vals['rely_cstr'])) {
+         $this->rely_cstr = $vals['rely_cstr'];
+       }
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'SQLPrimaryKey';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->column_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->key_seq);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->pk_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->enable_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 7:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->validate_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 8:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->rely_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 9:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('SQLPrimaryKey');
+     if ($this->table_db !== null) {
+       $xfer += $output->writeFieldBegin('table_db', TType::STRING, 1);
+       $xfer += $output->writeString($this->table_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_name !== null) {
+       $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2);
+       $xfer += $output->writeString($this->table_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->column_name !== null) {
+       $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3);
+       $xfer += $output->writeString($this->column_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->key_seq !== null) {
+       $xfer += $output->writeFieldBegin('key_seq', TType::I32, 4);
+       $xfer += $output->writeI32($this->key_seq);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->pk_name !== null) {
+       $xfer += $output->writeFieldBegin('pk_name', TType::STRING, 5);
+       $xfer += $output->writeString($this->pk_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->enable_cstr !== null) {
+       $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6);
+       $xfer += $output->writeBool($this->enable_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->validate_cstr !== null) {
+       $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7);
+       $xfer += $output->writeBool($this->validate_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->rely_cstr !== null) {
+       $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8);
+       $xfer += $output->writeBool($this->rely_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 9);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class SQLForeignKey {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $pktable_db = null;
+   /**
+    * @var string
+    */
+   public $pktable_name = null;
+   /**
+    * @var string
+    */
+   public $pkcolumn_name = null;
+   /**
+    * @var string
+    */
+   public $fktable_db = null;
+   /**
+    * @var string
+    */
+   public $fktable_name = null;
+   /**
+    * @var string
+    */
+   public $fkcolumn_name = null;
+   /**
+    * @var int
+    */
+   public $key_seq = null;
+   /**
+    * @var int
+    */
+   public $update_rule = null;
+   /**
+    * @var int
+    */
+   public $delete_rule = null;
+   /**
+    * @var string
+    */
+   public $fk_name = null;
+   /**
+    * @var string
+    */
+   public $pk_name = null;
+   /**
+    * @var bool
+    */
+   public $enable_cstr = null;
+   /**
+    * @var bool
+    */
+   public $validate_cstr = null;
+   /**
+    * @var bool
+    */
+   public $rely_cstr = null;
+   /**
+    * @var string
+    */
+   public $catName = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'pktable_db',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'pktable_name',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'pkcolumn_name',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'fktable_db',
+           'type' => TType::STRING,
+           ),
+         5 => array(
+           'var' => 'fktable_name',
+           'type' => TType::STRING,
+           ),
+         6 => array(
+           'var' => 'fkcolumn_name',
+           'type' => TType::STRING,
+           ),
+         7 => array(
+           'var' => 'key_seq',
+           'type' => TType::I32,
+           ),
+         8 => array(
+           'var' => 'update_rule',
+           'type' => TType::I32,
+           ),
+         9 => array(
+           'var' => 'delete_rule',
+           'type' => TType::I32,
+           ),
+         10 => array(
+           'var' => 'fk_name',
+           'type' => TType::STRING,
+           ),
+         11 => array(
+           'var' => 'pk_name',
+           'type' => TType::STRING,
+           ),
+         12 => array(
+           'var' => 'enable_cstr',
+           'type' => TType::BOOL,
+           ),
+         13 => array(
+           'var' => 'validate_cstr',
+           'type' => TType::BOOL,
+           ),
+         14 => array(
+           'var' => 'rely_cstr',
+           'type' => TType::BOOL,
+           ),
+         15 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['pktable_db'])) {
+         $this->pktable_db = $vals['pktable_db'];
+       }
+       if (isset($vals['pktable_name'])) {
+         $this->pktable_name = $vals['pktable_name'];
+       }
+       if (isset($vals['pkcolumn_name'])) {
+         $this->pkcolumn_name = $vals['pkcolumn_name'];
+       }
+       if (isset($vals['fktable_db'])) {
+         $this->fktable_db = $vals['fktable_db'];
+       }
+       if (isset($vals['fktable_name'])) {
+         $this->fktable_name = $vals['fktable_name'];
+       }
+       if (isset($vals['fkcolumn_name'])) {
+         $this->fkcolumn_name = $vals['fkcolumn_name'];
+       }
+       if (isset($vals['key_seq'])) {
+         $this->key_seq = $vals['key_seq'];
+       }
+       if (isset($vals['update_rule'])) {
+         $this->update_rule = $vals['update_rule'];
+       }
+       if (isset($vals['delete_rule'])) {
+         $this->delete_rule = $vals['delete_rule'];
+       }
+       if (isset($vals['fk_name'])) {
+         $this->fk_name = $vals['fk_name'];
+       }
+       if (isset($vals['pk_name'])) {
+         $this->pk_name = $vals['pk_name'];
+       }
+       if (isset($vals['enable_cstr'])) {
+         $this->enable_cstr = $vals['enable_cstr'];
+       }
+       if (isset($vals['validate_cstr'])) {
+         $this->validate_cstr = $vals['validate_cstr'];
+       }
+       if (isset($vals['rely_cstr'])) {
+         $this->rely_cstr = $vals['rely_cstr'];
+       }
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'SQLForeignKey';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->pktable_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->pktable_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->pkcolumn_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->fktable_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->fktable_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->fkcolumn_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 7:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->key_seq);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 8:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->update_rule);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 9:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->delete_rule);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 10:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->fk_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 11:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->pk_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 12:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->enable_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 13:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->validate_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 14:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->rely_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 15:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('SQLForeignKey');
+     if ($this->pktable_db !== null) {
+       $xfer += $output->writeFieldBegin('pktable_db', TType::STRING, 1);
+       $xfer += $output->writeString($this->pktable_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->pktable_name !== null) {
+       $xfer += $output->writeFieldBegin('pktable_name', TType::STRING, 2);
+       $xfer += $output->writeString($this->pktable_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->pkcolumn_name !== null) {
+       $xfer += $output->writeFieldBegin('pkcolumn_name', TType::STRING, 3);
+       $xfer += $output->writeString($this->pkcolumn_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->fktable_db !== null) {
+       $xfer += $output->writeFieldBegin('fktable_db', TType::STRING, 4);
+       $xfer += $output->writeString($this->fktable_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->fktable_name !== null) {
+       $xfer += $output->writeFieldBegin('fktable_name', TType::STRING, 5);
+       $xfer += $output->writeString($this->fktable_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->fkcolumn_name !== null) {
+       $xfer += $output->writeFieldBegin('fkcolumn_name', TType::STRING, 6);
+       $xfer += $output->writeString($this->fkcolumn_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->key_seq !== null) {
+       $xfer += $output->writeFieldBegin('key_seq', TType::I32, 7);
+       $xfer += $output->writeI32($this->key_seq);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->update_rule !== null) {
+       $xfer += $output->writeFieldBegin('update_rule', TType::I32, 8);
+       $xfer += $output->writeI32($this->update_rule);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->delete_rule !== null) {
+       $xfer += $output->writeFieldBegin('delete_rule', TType::I32, 9);
+       $xfer += $output->writeI32($this->delete_rule);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->fk_name !== null) {
+       $xfer += $output->writeFieldBegin('fk_name', TType::STRING, 10);
+       $xfer += $output->writeString($this->fk_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->pk_name !== null) {
+       $xfer += $output->writeFieldBegin('pk_name', TType::STRING, 11);
+       $xfer += $output->writeString($this->pk_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->enable_cstr !== null) {
+       $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 12);
+       $xfer += $output->writeBool($this->enable_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->validate_cstr !== null) {
+       $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 13);
+       $xfer += $output->writeBool($this->validate_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->rely_cstr !== null) {
+       $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 14);
+       $xfer += $output->writeBool($this->rely_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 15);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class SQLUniqueConstraint {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $catName = null;
+   /**
+    * @var string
+    */
+   public $table_db = null;
+   /**
+    * @var string
+    */
+   public $table_name = null;
+   /**
+    * @var string
+    */
+   public $column_name = null;
+   /**
+    * @var int
+    */
+   public $key_seq = null;
+   /**
+    * @var string
+    */
+   public $uk_name = null;
+   /**
+    * @var bool
+    */
+   public $enable_cstr = null;
+   /**
+    * @var bool
+    */
+   public $validate_cstr = null;
+   /**
+    * @var bool
+    */
+   public $rely_cstr = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'table_db',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'table_name',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'column_name',
+           'type' => TType::STRING,
+           ),
+         5 => array(
+           'var' => 'key_seq',
+           'type' => TType::I32,
+           ),
+         6 => array(
+           'var' => 'uk_name',
+           'type' => TType::STRING,
+           ),
+         7 => array(
+           'var' => 'enable_cstr',
+           'type' => TType::BOOL,
+           ),
+         8 => array(
+           'var' => 'validate_cstr',
+           'type' => TType::BOOL,
+           ),
+         9 => array(
+           'var' => 'rely_cstr',
+           'type' => TType::BOOL,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+       if (isset($vals['table_db'])) {
+         $this->table_db = $vals['table_db'];
+       }
+       if (isset($vals['table_name'])) {
+         $this->table_name = $vals['table_name'];
+       }
+       if (isset($vals['column_name'])) {
+         $this->column_name = $vals['column_name'];
+       }
+       if (isset($vals['key_seq'])) {
+         $this->key_seq = $vals['key_seq'];
+       }
+       if (isset($vals['uk_name'])) {
+         $this->uk_name = $vals['uk_name'];
+       }
+       if (isset($vals['enable_cstr'])) {
+         $this->enable_cstr = $vals['enable_cstr'];
+       }
+       if (isset($vals['validate_cstr'])) {
+         $this->validate_cstr = $vals['validate_cstr'];
+       }
+       if (isset($vals['rely_cstr'])) {
+         $this->rely_cstr = $vals['rely_cstr'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'SQLUniqueConstraint';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->column_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->key_seq);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->uk_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 7:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->enable_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 8:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->validate_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 9:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->rely_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('SQLUniqueConstraint');
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_db !== null) {
+       $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2);
+       $xfer += $output->writeString($this->table_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_name !== null) {
+       $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3);
+       $xfer += $output->writeString($this->table_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->column_name !== null) {
+       $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4);
+       $xfer += $output->writeString($this->column_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->key_seq !== null) {
+       $xfer += $output->writeFieldBegin('key_seq', TType::I32, 5);
+       $xfer += $output->writeI32($this->key_seq);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->uk_name !== null) {
+       $xfer += $output->writeFieldBegin('uk_name', TType::STRING, 6);
+       $xfer += $output->writeString($this->uk_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->enable_cstr !== null) {
+       $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7);
+       $xfer += $output->writeBool($this->enable_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->validate_cstr !== null) {
+       $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8);
+       $xfer += $output->writeBool($this->validate_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->rely_cstr !== null) {
+       $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9);
+       $xfer += $output->writeBool($this->rely_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class SQLNotNullConstraint {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $catName = null;
+   /**
+    * @var string
+    */
+   public $table_db = null;
+   /**
+    * @var string
+    */
+   public $table_name = null;
+   /**
+    * @var string
+    */
+   public $column_name = null;
+   /**
+    * @var string
+    */
+   public $nn_name = null;
+   /**
+    * @var bool
+    */
+   public $enable_cstr = null;
+   /**
+    * @var bool
+    */
+   public $validate_cstr = null;
+   /**
+    * @var bool
+    */
+   public $rely_cstr = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'table_db',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'table_name',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'column_name',
+           'type' => TType::STRING,
+           ),
+         5 => array(
+           'var' => 'nn_name',
+           'type' => TType::STRING,
+           ),
+         6 => array(
+           'var' => 'enable_cstr',
+           'type' => TType::BOOL,
+           ),
+         7 => array(
+           'var' => 'validate_cstr',
+           'type' => TType::BOOL,
+           ),
+         8 => array(
+           'var' => 'rely_cstr',
+           'type' => TType::BOOL,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+       if (isset($vals['table_db'])) {
+         $this->table_db = $vals['table_db'];
+       }
+       if (isset($vals['table_name'])) {
+         $this->table_name = $vals['table_name'];
+       }
+       if (isset($vals['column_name'])) {
+         $this->column_name = $vals['column_name'];
+       }
+       if (isset($vals['nn_name'])) {
+         $this->nn_name = $vals['nn_name'];
+       }
+       if (isset($vals['enable_cstr'])) {
+         $this->enable_cstr = $vals['enable_cstr'];
+       }
+       if (isset($vals['validate_cstr'])) {
+         $this->validate_cstr = $vals['validate_cstr'];
+       }
+       if (isset($vals['rely_cstr'])) {
+         $this->rely_cstr = $vals['rely_cstr'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'SQLNotNullConstraint';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->column_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->nn_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->enable_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 7:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->validate_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 8:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->rely_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('SQLNotNullConstraint');
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_db !== null) {
+       $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2);
+       $xfer += $output->writeString($this->table_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_name !== null) {
+       $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3);
+       $xfer += $output->writeString($this->table_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->column_name !== null) {
+       $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4);
+       $xfer += $output->writeString($this->column_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->nn_name !== null) {
+       $xfer += $output->writeFieldBegin('nn_name', TType::STRING, 5);
+       $xfer += $output->writeString($this->nn_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->enable_cstr !== null) {
+       $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 6);
+       $xfer += $output->writeBool($this->enable_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->validate_cstr !== null) {
+       $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 7);
+       $xfer += $output->writeBool($this->validate_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->rely_cstr !== null) {
+       $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 8);
+       $xfer += $output->writeBool($this->rely_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class SQLDefaultConstraint {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $catName = null;
+   /**
+    * @var string
+    */
+   public $table_db = null;
+   /**
+    * @var string
+    */
+   public $table_name = null;
+   /**
+    * @var string
+    */
+   public $column_name = null;
+   /**
+    * @var string
+    */
+   public $default_value = null;
+   /**
+    * @var string
+    */
+   public $dc_name = null;
+   /**
+    * @var bool
+    */
+   public $enable_cstr = null;
+   /**
+    * @var bool
+    */
+   public $validate_cstr = null;
+   /**
+    * @var bool
+    */
+   public $rely_cstr = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'table_db',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'table_name',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'column_name',
+           'type' => TType::STRING,
+           ),
+         5 => array(
+           'var' => 'default_value',
+           'type' => TType::STRING,
+           ),
+         6 => array(
+           'var' => 'dc_name',
+           'type' => TType::STRING,
+           ),
+         7 => array(
+           'var' => 'enable_cstr',
+           'type' => TType::BOOL,
+           ),
+         8 => array(
+           'var' => 'validate_cstr',
+           'type' => TType::BOOL,
+           ),
+         9 => array(
+           'var' => 'rely_cstr',
+           'type' => TType::BOOL,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+       if (isset($vals['table_db'])) {
+         $this->table_db = $vals['table_db'];
+       }
+       if (isset($vals['table_name'])) {
+         $this->table_name = $vals['table_name'];
+       }
+       if (isset($vals['column_name'])) {
+         $this->column_name = $vals['column_name'];
+       }
+       if (isset($vals['default_value'])) {
+         $this->default_value = $vals['default_value'];
+       }
+       if (isset($vals['dc_name'])) {
+         $this->dc_name = $vals['dc_name'];
+       }
+       if (isset($vals['enable_cstr'])) {
+         $this->enable_cstr = $vals['enable_cstr'];
+       }
+       if (isset($vals['validate_cstr'])) {
+         $this->validate_cstr = $vals['validate_cstr'];
+       }
+       if (isset($vals['rely_cstr'])) {
+         $this->rely_cstr = $vals['rely_cstr'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'SQLDefaultConstraint';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->column_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->default_value);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->dc_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 7:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->enable_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 8:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->validate_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 9:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->rely_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('SQLDefaultConstraint');
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_db !== null) {
+       $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2);
+       $xfer += $output->writeString($this->table_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_name !== null) {
+       $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3);
+       $xfer += $output->writeString($this->table_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->column_name !== null) {
+       $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4);
+       $xfer += $output->writeString($this->column_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->default_value !== null) {
+       $xfer += $output->writeFieldBegin('default_value', TType::STRING, 5);
+       $xfer += $output->writeString($this->default_value);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->dc_name !== null) {
+       $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 6);
+       $xfer += $output->writeString($this->dc_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->enable_cstr !== null) {
+       $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7);
+       $xfer += $output->writeBool($this->enable_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->validate_cstr !== null) {
+       $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8);
+       $xfer += $output->writeBool($this->validate_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->rely_cstr !== null) {
+       $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9);
+       $xfer += $output->writeBool($this->rely_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class SQLCheckConstraint {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $catName = null;
+   /**
+    * @var string
+    */
+   public $table_db = null;
+   /**
+    * @var string
+    */
+   public $table_name = null;
+   /**
+    * @var string
+    */
+   public $column_name = null;
+   /**
+    * @var string
+    */
+   public $check_expression = null;
+   /**
+    * @var string
+    */
+   public $dc_name = null;
+   /**
+    * @var bool
+    */
+   public $enable_cstr = null;
+   /**
+    * @var bool
+    */
+   public $validate_cstr = null;
+   /**
+    * @var bool
+    */
+   public $rely_cstr = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'table_db',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'table_name',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'column_name',
+           'type' => TType::STRING,
+           ),
+         5 => array(
+           'var' => 'check_expression',
+           'type' => TType::STRING,
+           ),
+         6 => array(
+           'var' => 'dc_name',
+           'type' => TType::STRING,
+           ),
+         7 => array(
+           'var' => 'enable_cstr',
+           'type' => TType::BOOL,
+           ),
+         8 => array(
+           'var' => 'validate_cstr',
+           'type' => TType::BOOL,
+           ),
+         9 => array(
+           'var' => 'rely_cstr',
+           'type' => TType::BOOL,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+       if (isset($vals['table_db'])) {
+         $this->table_db = $vals['table_db'];
+       }
+       if (isset($vals['table_name'])) {
+         $this->table_name = $vals['table_name'];
+       }
+       if (isset($vals['column_name'])) {
+         $this->column_name = $vals['column_name'];
+       }
+       if (isset($vals['check_expression'])) {
+         $this->check_expression = $vals['check_expression'];
+       }
+       if (isset($vals['dc_name'])) {
+         $this->dc_name = $vals['dc_name'];
+       }
+       if (isset($vals['enable_cstr'])) {
+         $this->enable_cstr = $vals['enable_cstr'];
+       }
+       if (isset($vals['validate_cstr'])) {
+         $this->validate_cstr = $vals['validate_cstr'];
+       }
+       if (isset($vals['rely_cstr'])) {
+         $this->rely_cstr = $vals['rely_cstr'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'SQLCheckConstraint';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_db);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->table_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->column_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->check_expression);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->dc_name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 7:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->enable_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 8:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->validate_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 9:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->rely_cstr);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('SQLCheckConstraint');
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 1);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_db !== null) {
+       $xfer += $output->writeFieldBegin('table_db', TType::STRING, 2);
+       $xfer += $output->writeString($this->table_db);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->table_name !== null) {
+       $xfer += $output->writeFieldBegin('table_name', TType::STRING, 3);
+       $xfer += $output->writeString($this->table_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->column_name !== null) {
+       $xfer += $output->writeFieldBegin('column_name', TType::STRING, 4);
+       $xfer += $output->writeString($this->column_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->check_expression !== null) {
+       $xfer += $output->writeFieldBegin('check_expression', TType::STRING, 5);
+       $xfer += $output->writeString($this->check_expression);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->dc_name !== null) {
+       $xfer += $output->writeFieldBegin('dc_name', TType::STRING, 6);
+       $xfer += $output->writeString($this->dc_name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->enable_cstr !== null) {
+       $xfer += $output->writeFieldBegin('enable_cstr', TType::BOOL, 7);
+       $xfer += $output->writeBool($this->enable_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->validate_cstr !== null) {
+       $xfer += $output->writeFieldBegin('validate_cstr', TType::BOOL, 8);
+       $xfer += $output->writeBool($this->validate_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->rely_cstr !== null) {
+       $xfer += $output->writeFieldBegin('rely_cstr', TType::BOOL, 9);
+       $xfer += $output->writeBool($this->rely_cstr);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class Type {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $name = null;
+   /**
+    * @var string
+    */
+   public $type1 = null;
+   /**
+    * @var string
+    */
+   public $type2 = null;
+   /**
+    * @var \metastore\FieldSchema[]
+    */
+   public $fields = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'name',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'type1',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'type2',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'fields',
+           'type' => TType::LST,
+           'etype' => TType::STRUCT,
+           'elem' => array(
+             'type' => TType::STRUCT,
+             'class' => '\metastore\FieldSchema',
+             ),
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['name'])) {
+         $this->name = $vals['name'];
+       }
+       if (isset($vals['type1'])) {
+         $this->type1 = $vals['type1'];
+       }
+       if (isset($vals['type2'])) {
+         $this->type2 = $vals['type2'];
+       }
+       if (isset($vals['fields'])) {
+         $this->fields = $vals['fields'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'Type';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->name);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->type1);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->type2);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::LST) {
+             $this->fields = array();
+             $_size0 = 0;
+             $_etype3 = 0;
+             $xfer += $input->readListBegin($_etype3, $_size0);
+             for ($_i4 = 0; $_i4 < $_size0; ++$_i4)
+             {
+               $elem5 = null;
+               $elem5 = new \metastore\FieldSchema();
+               $xfer += $elem5->read($input);
+               $this->fields []= $elem5;
+             }
+             $xfer += $input->readListEnd();
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('Type');
+     if ($this->name !== null) {
+       $xfer += $output->writeFieldBegin('name', TType::STRING, 1);
+       $xfer += $output->writeString($this->name);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->type1 !== null) {
+       $xfer += $output->writeFieldBegin('type1', TType::STRING, 2);
+       $xfer += $output->writeString($this->type1);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->type2 !== null) {
+       $xfer += $output->writeFieldBegin('type2', TType::STRING, 3);
+       $xfer += $output->writeString($this->type2);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->fields !== null) {
+       if (!is_array($this->fields)) {
+         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+       }
+       $xfer += $output->writeFieldBegin('fields', TType::LST, 4);
+       {
+         $output->writeListBegin(TType::STRUCT, count($this->fields));
+         {
+           foreach ($this->fields as $iter6)
+           {
+             $xfer += $iter6->write($output);
+           }
+         }
+         $output->writeListEnd();
+       }
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class HiveObjectRef {
+   static $_TSPEC;
+ 
+   /**
+    * @var int
+    */
+   public $objectType = null;
+   /**
+    * @var string
+    */
+   public $dbName = null;
+   /**
+    * @var string
+    */
+   public $objectName = null;
+   /**
+    * @var string[]
+    */
+   public $partValues = null;
+   /**
+    * @var string
+    */
+   public $columnName = null;
+   /**
+    * @var string
+    */
+   public $catName = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'objectType',
+           'type' => TType::I32,
+           ),
+         2 => array(
+           'var' => 'dbName',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'objectName',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'partValues',
+           'type' => TType::LST,
+           'etype' => TType::STRING,
+           'elem' => array(
+             'type' => TType::STRING,
+             ),
+           ),
+         5 => array(
+           'var' => 'columnName',
+           'type' => TType::STRING,
+           ),
+         6 => array(
+           'var' => 'catName',
+           'type' => TType::STRING,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['objectType'])) {
+         $this->objectType = $vals['objectType'];
+       }
+       if (isset($vals['dbName'])) {
+         $this->dbName = $vals['dbName'];
+       }
+       if (isset($vals['objectName'])) {
+         $this->objectName = $vals['objectName'];
+       }
+       if (isset($vals['partValues'])) {
+         $this->partValues = $vals['partValues'];
+       }
+       if (isset($vals['columnName'])) {
+         $this->columnName = $vals['columnName'];
+       }
+       if (isset($vals['catName'])) {
+         $this->catName = $vals['catName'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'HiveObjectRef';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->objectType);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->dbName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->objectName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::LST) {
+             $this->partValues = array();
+             $_size7 = 0;
+             $_etype10 = 0;
+             $xfer += $input->readListBegin($_etype10, $_size7);
+             for ($_i11 = 0; $_i11 < $_size7; ++$_i11)
+             {
+               $elem12 = null;
+               $xfer += $input->readString($elem12);
+               $this->partValues []= $elem12;
+             }
+             $xfer += $input->readListEnd();
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->columnName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 6:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->catName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('HiveObjectRef');
+     if ($this->objectType !== null) {
+       $xfer += $output->writeFieldBegin('objectType', TType::I32, 1);
+       $xfer += $output->writeI32($this->objectType);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->dbName !== null) {
+       $xfer += $output->writeFieldBegin('dbName', TType::STRING, 2);
+       $xfer += $output->writeString($this->dbName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->objectName !== null) {
+       $xfer += $output->writeFieldBegin('objectName', TType::STRING, 3);
+       $xfer += $output->writeString($this->objectName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->partValues !== null) {
+       if (!is_array($this->partValues)) {
+         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+       }
+       $xfer += $output->writeFieldBegin('partValues', TType::LST, 4);
+       {
+         $output->writeListBegin(TType::STRING, count($this->partValues));
+         {
+           foreach ($this->partValues as $iter13)
+           {
+             $xfer += $output->writeString($iter13);
+           }
+         }
+         $output->writeListEnd();
+       }
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->columnName !== null) {
+       $xfer += $output->writeFieldBegin('columnName', TType::STRING, 5);
+       $xfer += $output->writeString($this->columnName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->catName !== null) {
+       $xfer += $output->writeFieldBegin('catName', TType::STRING, 6);
+       $xfer += $output->writeString($this->catName);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class PrivilegeGrantInfo {
+   static $_TSPEC;
+ 
+   /**
+    * @var string
+    */
+   public $privilege = null;
+   /**
+    * @var int
+    */
+   public $createTime = null;
+   /**
+    * @var string
+    */
+   public $grantor = null;
+   /**
+    * @var int
+    */
+   public $grantorType = null;
+   /**
+    * @var bool
+    */
+   public $grantOption = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'privilege',
+           'type' => TType::STRING,
+           ),
+         2 => array(
+           'var' => 'createTime',
+           'type' => TType::I32,
+           ),
+         3 => array(
+           'var' => 'grantor',
+           'type' => TType::STRING,
+           ),
+         4 => array(
+           'var' => 'grantorType',
+           'type' => TType::I32,
+           ),
+         5 => array(
+           'var' => 'grantOption',
+           'type' => TType::BOOL,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['privilege'])) {
+         $this->privilege = $vals['privilege'];
+       }
+       if (isset($vals['createTime'])) {
+         $this->createTime = $vals['createTime'];
+       }
+       if (isset($vals['grantor'])) {
+         $this->grantor = $vals['grantor'];
+       }
+       if (isset($vals['grantorType'])) {
+         $this->grantorType = $vals['grantorType'];
+       }
+       if (isset($vals['grantOption'])) {
+         $this->grantOption = $vals['grantOption'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'PrivilegeGrantInfo';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->privilege);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->createTime);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->grantor);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->grantorType);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::BOOL) {
+             $xfer += $input->readBool($this->grantOption);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('PrivilegeGrantInfo');
+     if ($this->privilege !== null) {
+       $xfer += $output->writeFieldBegin('privilege', TType::STRING, 1);
+       $xfer += $output->writeString($this->privilege);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->createTime !== null) {
+       $xfer += $output->writeFieldBegin('createTime', TType::I32, 2);
+       $xfer += $output->writeI32($this->createTime);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->grantor !== null) {
+       $xfer += $output->writeFieldBegin('grantor', TType::STRING, 3);
+       $xfer += $output->writeString($this->grantor);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->grantorType !== null) {
+       $xfer += $output->writeFieldBegin('grantorType', TType::I32, 4);
+       $xfer += $output->writeI32($this->grantorType);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->grantOption !== null) {
+       $xfer += $output->writeFieldBegin('grantOption', TType::BOOL, 5);
+       $xfer += $output->writeBool($this->grantOption);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class HiveObjectPrivilege {
+   static $_TSPEC;
+ 
+   /**
+    * @var \metastore\HiveObjectRef
+    */
+   public $hiveObject = null;
+   /**
+    * @var string
+    */
+   public $principalName = null;
+   /**
+    * @var int
+    */
+   public $principalType = null;
+   /**
+    * @var \metastore\PrivilegeGrantInfo
+    */
+   public $grantInfo = null;
+   /**
+    * @var string
+    */
+   public $authorizer = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'hiveObject',
+           'type' => TType::STRUCT,
+           'class' => '\metastore\HiveObjectRef',
+           ),
+         2 => array(
+           'var' => 'principalName',
+           'type' => TType::STRING,
+           ),
+         3 => array(
+           'var' => 'principalType',
+           'type' => TType::I32,
+           ),
+         4 => array(
+           'var' => 'grantInfo',
+           'type' => TType::STRUCT,
+           'class' => '\metastore\PrivilegeGrantInfo',
+           ),
+         5 => array(
+           'var' => 'authorizer',
+           'type' => TType::STRING,
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['hiveObject'])) {
+         $this->hiveObject = $vals['hiveObject'];
+       }
+       if (isset($vals['principalName'])) {
+         $this->principalName = $vals['principalName'];
+       }
+       if (isset($vals['principalType'])) {
+         $this->principalType = $vals['principalType'];
+       }
+       if (isset($vals['grantInfo'])) {
+         $this->grantInfo = $vals['grantInfo'];
+       }
+       if (isset($vals['authorizer'])) {
+         $this->authorizer = $vals['authorizer'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'HiveObjectPrivilege';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::STRUCT) {
+             $this->hiveObject = new \metastore\HiveObjectRef();
+             $xfer += $this->hiveObject->read($input);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->principalName);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::I32) {
+             $xfer += $input->readI32($this->principalType);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 4:
+           if ($ftype == TType::STRUCT) {
+             $this->grantInfo = new \metastore\PrivilegeGrantInfo();
+             $xfer += $this->grantInfo->read($input);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 5:
+           if ($ftype == TType::STRING) {
+             $xfer += $input->readString($this->authorizer);
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('HiveObjectPrivilege');
+     if ($this->hiveObject !== null) {
+       if (!is_object($this->hiveObject)) {
+         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+       }
+       $xfer += $output->writeFieldBegin('hiveObject', TType::STRUCT, 1);
+       $xfer += $this->hiveObject->write($output);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->principalName !== null) {
+       $xfer += $output->writeFieldBegin('principalName', TType::STRING, 2);
+       $xfer += $output->writeString($this->principalName);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->principalType !== null) {
+       $xfer += $output->writeFieldBegin('principalType', TType::I32, 3);
+       $xfer += $output->writeI32($this->principalType);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->grantInfo !== null) {
+       if (!is_object($this->grantInfo)) {
+         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+       }
+       $xfer += $output->writeFieldBegin('grantInfo', TType::STRUCT, 4);
+       $xfer += $this->grantInfo->write($output);
+       $xfer += $output->writeFieldEnd();
+     }
+     if ($this->authorizer !== null) {
+       $xfer += $output->writeFieldBegin('authorizer', TType::STRING, 5);
+       $xfer += $output->writeString($this->authorizer);
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class PrivilegeBag {
+   static $_TSPEC;
+ 
+   /**
+    * @var \metastore\HiveObjectPrivilege[]
+    */
+   public $privileges = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'privileges',
+           'type' => TType::LST,
+           'etype' => TType::STRUCT,
+           'elem' => array(
+             'type' => TType::STRUCT,
+             'class' => '\metastore\HiveObjectPrivilege',
+             ),
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['privileges'])) {
+         $this->privileges = $vals['privileges'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'PrivilegeBag';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::LST) {
+             $this->privileges = array();
+             $_size14 = 0;
+             $_etype17 = 0;
+             $xfer += $input->readListBegin($_etype17, $_size14);
+             for ($_i18 = 0; $_i18 < $_size14; ++$_i18)
+             {
+               $elem19 = null;
+               $elem19 = new \metastore\HiveObjectPrivilege();
+               $xfer += $elem19->read($input);
+               $this->privileges []= $elem19;
+             }
+             $xfer += $input->readListEnd();
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         default:
+           $xfer += $input->skip($ftype);
+           break;
+       }
+       $xfer += $input->readFieldEnd();
+     }
+     $xfer += $input->readStructEnd();
+     return $xfer;
+   }
+ 
+   public function write($output) {
+     $xfer = 0;
+     $xfer += $output->writeStructBegin('PrivilegeBag');
+     if ($this->privileges !== null) {
+       if (!is_array($this->privileges)) {
+         throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA);
+       }
+       $xfer += $output->writeFieldBegin('privileges', TType::LST, 1);
+       {
+         $output->writeListBegin(TType::STRUCT, count($this->privileges));
+         {
+           foreach ($this->privileges as $iter20)
+           {
+             $xfer += $iter20->write($output);
+           }
+         }
+         $output->writeListEnd();
+       }
+       $xfer += $output->writeFieldEnd();
+     }
+     $xfer += $output->writeFieldStop();
+     $xfer += $output->writeStructEnd();
+     return $xfer;
+   }
+ 
+ }
+ 
+ class PrincipalPrivilegeSet {
+   static $_TSPEC;
+ 
+   /**
+    * @var array
+    */
+   public $userPrivileges = null;
+   /**
+    * @var array
+    */
+   public $groupPrivileges = null;
+   /**
+    * @var array
+    */
+   public $rolePrivileges = null;
+ 
+   public function __construct($vals=null) {
+     if (!isset(self::$_TSPEC)) {
+       self::$_TSPEC = array(
+         1 => array(
+           'var' => 'userPrivileges',
+           'type' => TType::MAP,
+           'ktype' => TType::STRING,
+           'vtype' => TType::LST,
+           'key' => array(
+             'type' => TType::STRING,
+           ),
+           'val' => array(
+             'type' => TType::LST,
+             'etype' => TType::STRUCT,
+             'elem' => array(
+               'type' => TType::STRUCT,
+               'class' => '\metastore\PrivilegeGrantInfo',
+               ),
+             ),
+           ),
+         2 => array(
+           'var' => 'groupPrivileges',
+           'type' => TType::MAP,
+           'ktype' => TType::STRING,
+           'vtype' => TType::LST,
+           'key' => array(
+             'type' => TType::STRING,
+           ),
+           'val' => array(
+             'type' => TType::LST,
+             'etype' => TType::STRUCT,
+             'elem' => array(
+               'type' => TType::STRUCT,
+               'class' => '\metastore\PrivilegeGrantInfo',
+               ),
+             ),
+           ),
+         3 => array(
+           'var' => 'rolePrivileges',
+           'type' => TType::MAP,
+           'ktype' => TType::STRING,
+           'vtype' => TType::LST,
+           'key' => array(
+             'type' => TType::STRING,
+           ),
+           'val' => array(
+             'type' => TType::LST,
+             'etype' => TType::STRUCT,
+             'elem' => array(
+               'type' => TType::STRUCT,
+               'class' => '\metastore\PrivilegeGrantInfo',
+               ),
+             ),
+           ),
+         );
+     }
+     if (is_array($vals)) {
+       if (isset($vals['userPrivileges'])) {
+         $this->userPrivileges = $vals['userPrivileges'];
+       }
+       if (isset($vals['groupPrivileges'])) {
+         $this->groupPrivileges = $vals['groupPrivileges'];
+       }
+       if (isset($vals['rolePrivileges'])) {
+         $this->rolePrivileges = $vals['rolePrivileges'];
+       }
+     }
+   }
+ 
+   public function getName() {
+     return 'PrincipalPrivilegeSet';
+   }
+ 
+   public function read($input)
+   {
+     $xfer = 0;
+     $fname = null;
+     $ftype = 0;
+     $fid = 0;
+     $xfer += $input->readStructBegin($fname);
+     while (true)
+     {
+       $xfer += $input->readFieldBegin($fname, $ftype, $fid);
+       if ($ftype == TType::STOP) {
+         break;
+       }
+       switch ($fid)
+       {
+         case 1:
+           if ($ftype == TType::MAP) {
+             $this->userPrivileges = array();
+             $_size21 = 0;
+             $_ktype22 = 0;
+             $_vtype23 = 0;
+             $xfer += $input->readMapBegin($_ktype22, $_vtype23, $_size21);
+             for ($_i25 = 0; $_i25 < $_size21; ++$_i25)
+             {
+               $key26 = '';
+               $val27 = array();
+               $xfer += $input->readString($key26);
+               $val27 = array();
+               $_size28 = 0;
+               $_etype31 = 0;
+               $xfer += $input->readListBegin($_etype31, $_size28);
+               for ($_i32 = 0; $_i32 < $_size28; ++$_i32)
+               {
+                 $elem33 = null;
+                 $elem33 = new \metastore\PrivilegeGrantInfo();
+                 $xfer += $elem33->read($input);
+                 $val27 []= $elem33;
+               }
+               $xfer += $input->readListEnd();
+               $this->userPrivileges[$key26] = $val27;
+             }
+             $xfer += $input->readMapEnd();
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 2:
+           if ($ftype == TType::MAP) {
+             $this->groupPrivileges = array();
+             $_size34 = 0;
+             $_ktype35 = 0;
+             $_vtype36 = 0;
+             $xfer += $input->readMapBegin($_ktype35, $_vtype36, $_size34);
+             for ($_i38 = 0; $_i38 < $_size34; ++$_i38)
+             {
+               $key39 = '';
+               $val40 = array();
+               $xfer += $input->readString($key39);
+               $val40 = array();
+               $_size41 = 0;
+               $_etype44 = 0;
+               $xfer += $input->readListBegin($_etype44, $_size41);
+               for ($_i45 = 0; $_i45 < $_size41; ++$_i45)
+               {
+                 $elem46 = null;
+                 $elem46 = new \metastore\PrivilegeGrantInfo();
+                 $xfer += $elem46->read($input);
+                 $val40 []= $elem46;
+               }
+               $xfer += $input->readListEnd();
+               $this->groupPrivileges[$key39] = $val40;
+             }
+             $xfer += $input->readMapEnd();
+           } else {
+             $xfer += $input->skip($ftype);
+           }
+           break;
+         case 3:
+           if ($ftype == TType::MAP) {
+             $this->rolePrivileges = array();
+             $_size47 = 0;
+             $_ktype48 = 0;
+             $_vtype49 = 0;
+             $xfer += $input->readMapBegin($_ktype48, $_vtype49, $_size47);
+             for ($_i51 = 0; $_i51 < $_size47; ++$_i51)
+          

<TRUNCATED>

[09/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
new file mode 100644
index 0000000..5807618
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java
@@ -0,0 +1,1016 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionWithoutSD implements org.apache.thrift.TBase<PartitionWithoutSD, PartitionWithoutSD._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionWithoutSD> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionWithoutSD");
+
+  private static final org.apache.thrift.protocol.TField VALUES_FIELD_DESC = new org.apache.thrift.protocol.TField("values", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField CREATE_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("createTime", org.apache.thrift.protocol.TType.I32, (short)2);
+  private static final org.apache.thrift.protocol.TField LAST_ACCESS_TIME_FIELD_DESC = new org.apache.thrift.protocol.TField("lastAccessTime", org.apache.thrift.protocol.TType.I32, (short)3);
+  private static final org.apache.thrift.protocol.TField RELATIVE_PATH_FIELD_DESC = new org.apache.thrift.protocol.TField("relativePath", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField PARAMETERS_FIELD_DESC = new org.apache.thrift.protocol.TField("parameters", org.apache.thrift.protocol.TType.MAP, (short)5);
+  private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionWithoutSDStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionWithoutSDTupleSchemeFactory());
+  }
+
+  private List<String> values; // required
+  private int createTime; // required
+  private int lastAccessTime; // required
+  private String relativePath; // required
+  private Map<String,String> parameters; // required
+  private PrincipalPrivilegeSet privileges; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    VALUES((short)1, "values"),
+    CREATE_TIME((short)2, "createTime"),
+    LAST_ACCESS_TIME((short)3, "lastAccessTime"),
+    RELATIVE_PATH((short)4, "relativePath"),
+    PARAMETERS((short)5, "parameters"),
+    PRIVILEGES((short)6, "privileges");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // VALUES
+          return VALUES;
+        case 2: // CREATE_TIME
+          return CREATE_TIME;
+        case 3: // LAST_ACCESS_TIME
+          return LAST_ACCESS_TIME;
+        case 4: // RELATIVE_PATH
+          return RELATIVE_PATH;
+        case 5: // PARAMETERS
+          return PARAMETERS;
+        case 6: // PRIVILEGES
+          return PRIVILEGES;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __CREATETIME_ISSET_ID = 0;
+  private static final int __LASTACCESSTIME_ISSET_ID = 1;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.PRIVILEGES};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.VALUES, new org.apache.thrift.meta_data.FieldMetaData("values", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.CREATE_TIME, new org.apache.thrift.meta_data.FieldMetaData("createTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.LAST_ACCESS_TIME, new org.apache.thrift.meta_data.FieldMetaData("lastAccessTime", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
+    tmpMap.put(_Fields.RELATIVE_PATH, new org.apache.thrift.meta_data.FieldMetaData("relativePath", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.PARAMETERS, new org.apache.thrift.meta_data.FieldMetaData("parameters", org.apache.thrift.TFieldRequirementType.DEFAULT, 
+        new org.apache.thrift.meta_data.MapMetaData(org.apache.thrift.protocol.TType.MAP, 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING), 
+            new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))));
+    tmpMap.put(_Fields.PRIVILEGES, new org.apache.thrift.meta_data.FieldMetaData("privileges", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionWithoutSD.class, metaDataMap);
+  }
+
+  public PartitionWithoutSD() {
+  }
+
+  public PartitionWithoutSD(
+    List<String> values,
+    int createTime,
+    int lastAccessTime,
+    String relativePath,
+    Map<String,String> parameters)
+  {
+    this();
+    this.values = values;
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+    this.lastAccessTime = lastAccessTime;
+    setLastAccessTimeIsSet(true);
+    this.relativePath = relativePath;
+    this.parameters = parameters;
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionWithoutSD(PartitionWithoutSD other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetValues()) {
+      List<String> __this__values = new ArrayList<String>(other.values);
+      this.values = __this__values;
+    }
+    this.createTime = other.createTime;
+    this.lastAccessTime = other.lastAccessTime;
+    if (other.isSetRelativePath()) {
+      this.relativePath = other.relativePath;
+    }
+    if (other.isSetParameters()) {
+      Map<String,String> __this__parameters = new HashMap<String,String>(other.parameters);
+      this.parameters = __this__parameters;
+    }
+    if (other.isSetPrivileges()) {
+      this.privileges = new PrincipalPrivilegeSet(other.privileges);
+    }
+  }
+
+  public PartitionWithoutSD deepCopy() {
+    return new PartitionWithoutSD(this);
+  }
+
+  @Override
+  public void clear() {
+    this.values = null;
+    setCreateTimeIsSet(false);
+    this.createTime = 0;
+    setLastAccessTimeIsSet(false);
+    this.lastAccessTime = 0;
+    this.relativePath = null;
+    this.parameters = null;
+    this.privileges = null;
+  }
+
+  public int getValuesSize() {
+    return (this.values == null) ? 0 : this.values.size();
+  }
+
+  public java.util.Iterator<String> getValuesIterator() {
+    return (this.values == null) ? null : this.values.iterator();
+  }
+
+  public void addToValues(String elem) {
+    if (this.values == null) {
+      this.values = new ArrayList<String>();
+    }
+    this.values.add(elem);
+  }
+
+  public List<String> getValues() {
+    return this.values;
+  }
+
+  public void setValues(List<String> values) {
+    this.values = values;
+  }
+
+  public void unsetValues() {
+    this.values = null;
+  }
+
+  /** Returns true if field values is set (has been assigned a value) and false otherwise */
+  public boolean isSetValues() {
+    return this.values != null;
+  }
+
+  public void setValuesIsSet(boolean value) {
+    if (!value) {
+      this.values = null;
+    }
+  }
+
+  public int getCreateTime() {
+    return this.createTime;
+  }
+
+  public void setCreateTime(int createTime) {
+    this.createTime = createTime;
+    setCreateTimeIsSet(true);
+  }
+
+  public void unsetCreateTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  /** Returns true if field createTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetCreateTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __CREATETIME_ISSET_ID);
+  }
+
+  public void setCreateTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __CREATETIME_ISSET_ID, value);
+  }
+
+  public int getLastAccessTime() {
+    return this.lastAccessTime;
+  }
+
+  public void setLastAccessTime(int lastAccessTime) {
+    this.lastAccessTime = lastAccessTime;
+    setLastAccessTimeIsSet(true);
+  }
+
+  public void unsetLastAccessTime() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+  }
+
+  /** Returns true if field lastAccessTime is set (has been assigned a value) and false otherwise */
+  public boolean isSetLastAccessTime() {
+    return EncodingUtils.testBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID);
+  }
+
+  public void setLastAccessTimeIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __LASTACCESSTIME_ISSET_ID, value);
+  }
+
+  public String getRelativePath() {
+    return this.relativePath;
+  }
+
+  public void setRelativePath(String relativePath) {
+    this.relativePath = relativePath;
+  }
+
+  public void unsetRelativePath() {
+    this.relativePath = null;
+  }
+
+  /** Returns true if field relativePath is set (has been assigned a value) and false otherwise */
+  public boolean isSetRelativePath() {
+    return this.relativePath != null;
+  }
+
+  public void setRelativePathIsSet(boolean value) {
+    if (!value) {
+      this.relativePath = null;
+    }
+  }
+
+  public int getParametersSize() {
+    return (this.parameters == null) ? 0 : this.parameters.size();
+  }
+
+  public void putToParameters(String key, String val) {
+    if (this.parameters == null) {
+      this.parameters = new HashMap<String,String>();
+    }
+    this.parameters.put(key, val);
+  }
+
+  public Map<String,String> getParameters() {
+    return this.parameters;
+  }
+
+  public void setParameters(Map<String,String> parameters) {
+    this.parameters = parameters;
+  }
+
+  public void unsetParameters() {
+    this.parameters = null;
+  }
+
+  /** Returns true if field parameters is set (has been assigned a value) and false otherwise */
+  public boolean isSetParameters() {
+    return this.parameters != null;
+  }
+
+  public void setParametersIsSet(boolean value) {
+    if (!value) {
+      this.parameters = null;
+    }
+  }
+
+  public PrincipalPrivilegeSet getPrivileges() {
+    return this.privileges;
+  }
+
+  public void setPrivileges(PrincipalPrivilegeSet privileges) {
+    this.privileges = privileges;
+  }
+
+  public void unsetPrivileges() {
+    this.privileges = null;
+  }
+
+  /** Returns true if field privileges is set (has been assigned a value) and false otherwise */
+  public boolean isSetPrivileges() {
+    return this.privileges != null;
+  }
+
+  public void setPrivilegesIsSet(boolean value) {
+    if (!value) {
+      this.privileges = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case VALUES:
+      if (value == null) {
+        unsetValues();
+      } else {
+        setValues((List<String>)value);
+      }
+      break;
+
+    case CREATE_TIME:
+      if (value == null) {
+        unsetCreateTime();
+      } else {
+        setCreateTime((Integer)value);
+      }
+      break;
+
+    case LAST_ACCESS_TIME:
+      if (value == null) {
+        unsetLastAccessTime();
+      } else {
+        setLastAccessTime((Integer)value);
+      }
+      break;
+
+    case RELATIVE_PATH:
+      if (value == null) {
+        unsetRelativePath();
+      } else {
+        setRelativePath((String)value);
+      }
+      break;
+
+    case PARAMETERS:
+      if (value == null) {
+        unsetParameters();
+      } else {
+        setParameters((Map<String,String>)value);
+      }
+      break;
+
+    case PRIVILEGES:
+      if (value == null) {
+        unsetPrivileges();
+      } else {
+        setPrivileges((PrincipalPrivilegeSet)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case VALUES:
+      return getValues();
+
+    case CREATE_TIME:
+      return getCreateTime();
+
+    case LAST_ACCESS_TIME:
+      return getLastAccessTime();
+
+    case RELATIVE_PATH:
+      return getRelativePath();
+
+    case PARAMETERS:
+      return getParameters();
+
+    case PRIVILEGES:
+      return getPrivileges();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case VALUES:
+      return isSetValues();
+    case CREATE_TIME:
+      return isSetCreateTime();
+    case LAST_ACCESS_TIME:
+      return isSetLastAccessTime();
+    case RELATIVE_PATH:
+      return isSetRelativePath();
+    case PARAMETERS:
+      return isSetParameters();
+    case PRIVILEGES:
+      return isSetPrivileges();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionWithoutSD)
+      return this.equals((PartitionWithoutSD)that);
+    return false;
+  }
+
+  public boolean equals(PartitionWithoutSD that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_values = true && this.isSetValues();
+    boolean that_present_values = true && that.isSetValues();
+    if (this_present_values || that_present_values) {
+      if (!(this_present_values && that_present_values))
+        return false;
+      if (!this.values.equals(that.values))
+        return false;
+    }
+
+    boolean this_present_createTime = true;
+    boolean that_present_createTime = true;
+    if (this_present_createTime || that_present_createTime) {
+      if (!(this_present_createTime && that_present_createTime))
+        return false;
+      if (this.createTime != that.createTime)
+        return false;
+    }
+
+    boolean this_present_lastAccessTime = true;
+    boolean that_present_lastAccessTime = true;
+    if (this_present_lastAccessTime || that_present_lastAccessTime) {
+      if (!(this_present_lastAccessTime && that_present_lastAccessTime))
+        return false;
+      if (this.lastAccessTime != that.lastAccessTime)
+        return false;
+    }
+
+    boolean this_present_relativePath = true && this.isSetRelativePath();
+    boolean that_present_relativePath = true && that.isSetRelativePath();
+    if (this_present_relativePath || that_present_relativePath) {
+      if (!(this_present_relativePath && that_present_relativePath))
+        return false;
+      if (!this.relativePath.equals(that.relativePath))
+        return false;
+    }
+
+    boolean this_present_parameters = true && this.isSetParameters();
+    boolean that_present_parameters = true && that.isSetParameters();
+    if (this_present_parameters || that_present_parameters) {
+      if (!(this_present_parameters && that_present_parameters))
+        return false;
+      if (!this.parameters.equals(that.parameters))
+        return false;
+    }
+
+    boolean this_present_privileges = true && this.isSetPrivileges();
+    boolean that_present_privileges = true && that.isSetPrivileges();
+    if (this_present_privileges || that_present_privileges) {
+      if (!(this_present_privileges && that_present_privileges))
+        return false;
+      if (!this.privileges.equals(that.privileges))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_values = true && (isSetValues());
+    list.add(present_values);
+    if (present_values)
+      list.add(values);
+
+    boolean present_createTime = true;
+    list.add(present_createTime);
+    if (present_createTime)
+      list.add(createTime);
+
+    boolean present_lastAccessTime = true;
+    list.add(present_lastAccessTime);
+    if (present_lastAccessTime)
+      list.add(lastAccessTime);
+
+    boolean present_relativePath = true && (isSetRelativePath());
+    list.add(present_relativePath);
+    if (present_relativePath)
+      list.add(relativePath);
+
+    boolean present_parameters = true && (isSetParameters());
+    list.add(present_parameters);
+    if (present_parameters)
+      list.add(parameters);
+
+    boolean present_privileges = true && (isSetPrivileges());
+    list.add(present_privileges);
+    if (present_privileges)
+      list.add(privileges);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionWithoutSD other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetValues()).compareTo(other.isSetValues());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetValues()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.values, other.values);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCreateTime()).compareTo(other.isSetCreateTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCreateTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.createTime, other.createTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetLastAccessTime()).compareTo(other.isSetLastAccessTime());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetLastAccessTime()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.lastAccessTime, other.lastAccessTime);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetRelativePath()).compareTo(other.isSetRelativePath());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetRelativePath()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.relativePath, other.relativePath);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetParameters()).compareTo(other.isSetParameters());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetParameters()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parameters, other.parameters);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetPrivileges()).compareTo(other.isSetPrivileges());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPrivileges()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.privileges, other.privileges);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionWithoutSD(");
+    boolean first = true;
+
+    sb.append("values:");
+    if (this.values == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.values);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("createTime:");
+    sb.append(this.createTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("lastAccessTime:");
+    sb.append(this.lastAccessTime);
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("relativePath:");
+    if (this.relativePath == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.relativePath);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("parameters:");
+    if (this.parameters == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.parameters);
+    }
+    first = false;
+    if (isSetPrivileges()) {
+      if (!first) sb.append(", ");
+      sb.append("privileges:");
+      if (this.privileges == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.privileges);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    // check for sub-struct validity
+    if (privileges != null) {
+      privileges.validate();
+    }
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionWithoutSDStandardSchemeFactory implements SchemeFactory {
+    public PartitionWithoutSDStandardScheme getScheme() {
+      return new PartitionWithoutSDStandardScheme();
+    }
+  }
+
+  private static class PartitionWithoutSDStandardScheme extends StandardScheme<PartitionWithoutSD> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // VALUES
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list234 = iprot.readListBegin();
+                struct.values = new ArrayList<String>(_list234.size);
+                String _elem235;
+                for (int _i236 = 0; _i236 < _list234.size; ++_i236)
+                {
+                  _elem235 = iprot.readString();
+                  struct.values.add(_elem235);
+                }
+                iprot.readListEnd();
+              }
+              struct.setValuesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // CREATE_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.createTime = iprot.readI32();
+              struct.setCreateTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // LAST_ACCESS_TIME
+            if (schemeField.type == org.apache.thrift.protocol.TType.I32) {
+              struct.lastAccessTime = iprot.readI32();
+              struct.setLastAccessTimeIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // RELATIVE_PATH
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.relativePath = iprot.readString();
+              struct.setRelativePathIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // PARAMETERS
+            if (schemeField.type == org.apache.thrift.protocol.TType.MAP) {
+              {
+                org.apache.thrift.protocol.TMap _map237 = iprot.readMapBegin();
+                struct.parameters = new HashMap<String,String>(2*_map237.size);
+                String _key238;
+                String _val239;
+                for (int _i240 = 0; _i240 < _map237.size; ++_i240)
+                {
+                  _key238 = iprot.readString();
+                  _val239 = iprot.readString();
+                  struct.parameters.put(_key238, _val239);
+                }
+                iprot.readMapEnd();
+              }
+              struct.setParametersIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // PRIVILEGES
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) {
+              struct.privileges = new PrincipalPrivilegeSet();
+              struct.privileges.read(iprot);
+              struct.setPrivilegesIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.values != null) {
+        oprot.writeFieldBegin(VALUES_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size()));
+          for (String _iter241 : struct.values)
+          {
+            oprot.writeString(_iter241);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(CREATE_TIME_FIELD_DESC);
+      oprot.writeI32(struct.createTime);
+      oprot.writeFieldEnd();
+      oprot.writeFieldBegin(LAST_ACCESS_TIME_FIELD_DESC);
+      oprot.writeI32(struct.lastAccessTime);
+      oprot.writeFieldEnd();
+      if (struct.relativePath != null) {
+        oprot.writeFieldBegin(RELATIVE_PATH_FIELD_DESC);
+        oprot.writeString(struct.relativePath);
+        oprot.writeFieldEnd();
+      }
+      if (struct.parameters != null) {
+        oprot.writeFieldBegin(PARAMETERS_FIELD_DESC);
+        {
+          oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size()));
+          for (Map.Entry<String, String> _iter242 : struct.parameters.entrySet())
+          {
+            oprot.writeString(_iter242.getKey());
+            oprot.writeString(_iter242.getValue());
+          }
+          oprot.writeMapEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      if (struct.privileges != null) {
+        if (struct.isSetPrivileges()) {
+          oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC);
+          struct.privileges.write(oprot);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionWithoutSDTupleSchemeFactory implements SchemeFactory {
+    public PartitionWithoutSDTupleScheme getScheme() {
+      return new PartitionWithoutSDTupleScheme();
+    }
+  }
+
+  private static class PartitionWithoutSDTupleScheme extends TupleScheme<PartitionWithoutSD> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      BitSet optionals = new BitSet();
+      if (struct.isSetValues()) {
+        optionals.set(0);
+      }
+      if (struct.isSetCreateTime()) {
+        optionals.set(1);
+      }
+      if (struct.isSetLastAccessTime()) {
+        optionals.set(2);
+      }
+      if (struct.isSetRelativePath()) {
+        optionals.set(3);
+      }
+      if (struct.isSetParameters()) {
+        optionals.set(4);
+      }
+      if (struct.isSetPrivileges()) {
+        optionals.set(5);
+      }
+      oprot.writeBitSet(optionals, 6);
+      if (struct.isSetValues()) {
+        {
+          oprot.writeI32(struct.values.size());
+          for (String _iter243 : struct.values)
+          {
+            oprot.writeString(_iter243);
+          }
+        }
+      }
+      if (struct.isSetCreateTime()) {
+        oprot.writeI32(struct.createTime);
+      }
+      if (struct.isSetLastAccessTime()) {
+        oprot.writeI32(struct.lastAccessTime);
+      }
+      if (struct.isSetRelativePath()) {
+        oprot.writeString(struct.relativePath);
+      }
+      if (struct.isSetParameters()) {
+        {
+          oprot.writeI32(struct.parameters.size());
+          for (Map.Entry<String, String> _iter244 : struct.parameters.entrySet())
+          {
+            oprot.writeString(_iter244.getKey());
+            oprot.writeString(_iter244.getValue());
+          }
+        }
+      }
+      if (struct.isSetPrivileges()) {
+        struct.privileges.write(oprot);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      BitSet incoming = iprot.readBitSet(6);
+      if (incoming.get(0)) {
+        {
+          org.apache.thrift.protocol.TList _list245 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.values = new ArrayList<String>(_list245.size);
+          String _elem246;
+          for (int _i247 = 0; _i247 < _list245.size; ++_i247)
+          {
+            _elem246 = iprot.readString();
+            struct.values.add(_elem246);
+          }
+        }
+        struct.setValuesIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.createTime = iprot.readI32();
+        struct.setCreateTimeIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.lastAccessTime = iprot.readI32();
+        struct.setLastAccessTimeIsSet(true);
+      }
+      if (incoming.get(3)) {
+        struct.relativePath = iprot.readString();
+        struct.setRelativePathIsSet(true);
+      }
+      if (incoming.get(4)) {
+        {
+          org.apache.thrift.protocol.TMap _map248 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
+          struct.parameters = new HashMap<String,String>(2*_map248.size);
+          String _key249;
+          String _val250;
+          for (int _i251 = 0; _i251 < _map248.size; ++_i251)
+          {
+            _key249 = iprot.readString();
+            _val250 = iprot.readString();
+            struct.parameters.put(_key249, _val250);
+          }
+        }
+        struct.setParametersIsSet(true);
+      }
+      if (incoming.get(5)) {
+        struct.privileges = new PrincipalPrivilegeSet();
+        struct.privileges.read(iprot);
+        struct.setPrivilegesIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
new file mode 100644
index 0000000..0e72625
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprRequest.java
@@ -0,0 +1,921 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionsByExprRequest implements org.apache.thrift.TBase<PartitionsByExprRequest, PartitionsByExprRequest._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsByExprRequest> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprRequest");
+
+  private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("dbName", org.apache.thrift.protocol.TType.STRING, (short)1);
+  private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tblName", org.apache.thrift.protocol.TType.STRING, (short)2);
+  private static final org.apache.thrift.protocol.TField EXPR_FIELD_DESC = new org.apache.thrift.protocol.TField("expr", org.apache.thrift.protocol.TType.STRING, (short)3);
+  private static final org.apache.thrift.protocol.TField DEFAULT_PARTITION_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("defaultPartitionName", org.apache.thrift.protocol.TType.STRING, (short)4);
+  private static final org.apache.thrift.protocol.TField MAX_PARTS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxParts", org.apache.thrift.protocol.TType.I16, (short)5);
+  private static final org.apache.thrift.protocol.TField CAT_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catName", org.apache.thrift.protocol.TType.STRING, (short)6);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionsByExprRequestStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionsByExprRequestTupleSchemeFactory());
+  }
+
+  private String dbName; // required
+  private String tblName; // required
+  private ByteBuffer expr; // required
+  private String defaultPartitionName; // optional
+  private short maxParts; // optional
+  private String catName; // optional
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    DB_NAME((short)1, "dbName"),
+    TBL_NAME((short)2, "tblName"),
+    EXPR((short)3, "expr"),
+    DEFAULT_PARTITION_NAME((short)4, "defaultPartitionName"),
+    MAX_PARTS((short)5, "maxParts"),
+    CAT_NAME((short)6, "catName");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // DB_NAME
+          return DB_NAME;
+        case 2: // TBL_NAME
+          return TBL_NAME;
+        case 3: // EXPR
+          return EXPR;
+        case 4: // DEFAULT_PARTITION_NAME
+          return DEFAULT_PARTITION_NAME;
+        case 5: // MAX_PARTS
+          return MAX_PARTS;
+        case 6: // CAT_NAME
+          return CAT_NAME;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __MAXPARTS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  private static final _Fields optionals[] = {_Fields.DEFAULT_PARTITION_NAME,_Fields.MAX_PARTS,_Fields.CAT_NAME};
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("dbName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tblName", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.EXPR, new org.apache.thrift.meta_data.FieldMetaData("expr", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING        , true)));
+    tmpMap.put(_Fields.DEFAULT_PARTITION_NAME, new org.apache.thrift.meta_data.FieldMetaData("defaultPartitionName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    tmpMap.put(_Fields.MAX_PARTS, new org.apache.thrift.meta_data.FieldMetaData("maxParts", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16)));
+    tmpMap.put(_Fields.CAT_NAME, new org.apache.thrift.meta_data.FieldMetaData("catName", org.apache.thrift.TFieldRequirementType.OPTIONAL, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprRequest.class, metaDataMap);
+  }
+
+  public PartitionsByExprRequest() {
+    this.maxParts = (short)-1;
+
+  }
+
+  public PartitionsByExprRequest(
+    String dbName,
+    String tblName,
+    ByteBuffer expr)
+  {
+    this();
+    this.dbName = dbName;
+    this.tblName = tblName;
+    this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionsByExprRequest(PartitionsByExprRequest other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetDbName()) {
+      this.dbName = other.dbName;
+    }
+    if (other.isSetTblName()) {
+      this.tblName = other.tblName;
+    }
+    if (other.isSetExpr()) {
+      this.expr = org.apache.thrift.TBaseHelper.copyBinary(other.expr);
+    }
+    if (other.isSetDefaultPartitionName()) {
+      this.defaultPartitionName = other.defaultPartitionName;
+    }
+    this.maxParts = other.maxParts;
+    if (other.isSetCatName()) {
+      this.catName = other.catName;
+    }
+  }
+
+  public PartitionsByExprRequest deepCopy() {
+    return new PartitionsByExprRequest(this);
+  }
+
+  @Override
+  public void clear() {
+    this.dbName = null;
+    this.tblName = null;
+    this.expr = null;
+    this.defaultPartitionName = null;
+    this.maxParts = (short)-1;
+
+    this.catName = null;
+  }
+
+  public String getDbName() {
+    return this.dbName;
+  }
+
+  public void setDbName(String dbName) {
+    this.dbName = dbName;
+  }
+
+  public void unsetDbName() {
+    this.dbName = null;
+  }
+
+  /** Returns true if field dbName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDbName() {
+    return this.dbName != null;
+  }
+
+  public void setDbNameIsSet(boolean value) {
+    if (!value) {
+      this.dbName = null;
+    }
+  }
+
+  public String getTblName() {
+    return this.tblName;
+  }
+
+  public void setTblName(String tblName) {
+    this.tblName = tblName;
+  }
+
+  public void unsetTblName() {
+    this.tblName = null;
+  }
+
+  /** Returns true if field tblName is set (has been assigned a value) and false otherwise */
+  public boolean isSetTblName() {
+    return this.tblName != null;
+  }
+
+  public void setTblNameIsSet(boolean value) {
+    if (!value) {
+      this.tblName = null;
+    }
+  }
+
+  public byte[] getExpr() {
+    setExpr(org.apache.thrift.TBaseHelper.rightSize(expr));
+    return expr == null ? null : expr.array();
+  }
+
+  public ByteBuffer bufferForExpr() {
+    return org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  public void setExpr(byte[] expr) {
+    this.expr = expr == null ? (ByteBuffer)null : ByteBuffer.wrap(Arrays.copyOf(expr, expr.length));
+  }
+
+  public void setExpr(ByteBuffer expr) {
+    this.expr = org.apache.thrift.TBaseHelper.copyBinary(expr);
+  }
+
+  public void unsetExpr() {
+    this.expr = null;
+  }
+
+  /** Returns true if field expr is set (has been assigned a value) and false otherwise */
+  public boolean isSetExpr() {
+    return this.expr != null;
+  }
+
+  public void setExprIsSet(boolean value) {
+    if (!value) {
+      this.expr = null;
+    }
+  }
+
+  public String getDefaultPartitionName() {
+    return this.defaultPartitionName;
+  }
+
+  public void setDefaultPartitionName(String defaultPartitionName) {
+    this.defaultPartitionName = defaultPartitionName;
+  }
+
+  public void unsetDefaultPartitionName() {
+    this.defaultPartitionName = null;
+  }
+
+  /** Returns true if field defaultPartitionName is set (has been assigned a value) and false otherwise */
+  public boolean isSetDefaultPartitionName() {
+    return this.defaultPartitionName != null;
+  }
+
+  public void setDefaultPartitionNameIsSet(boolean value) {
+    if (!value) {
+      this.defaultPartitionName = null;
+    }
+  }
+
+  public short getMaxParts() {
+    return this.maxParts;
+  }
+
+  public void setMaxParts(short maxParts) {
+    this.maxParts = maxParts;
+    setMaxPartsIsSet(true);
+  }
+
+  public void unsetMaxParts() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXPARTS_ISSET_ID);
+  }
+
+  /** Returns true if field maxParts is set (has been assigned a value) and false otherwise */
+  public boolean isSetMaxParts() {
+    return EncodingUtils.testBit(__isset_bitfield, __MAXPARTS_ISSET_ID);
+  }
+
+  public void setMaxPartsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXPARTS_ISSET_ID, value);
+  }
+
+  public String getCatName() {
+    return this.catName;
+  }
+
+  public void setCatName(String catName) {
+    this.catName = catName;
+  }
+
+  public void unsetCatName() {
+    this.catName = null;
+  }
+
+  /** Returns true if field catName is set (has been assigned a value) and false otherwise */
+  public boolean isSetCatName() {
+    return this.catName != null;
+  }
+
+  public void setCatNameIsSet(boolean value) {
+    if (!value) {
+      this.catName = null;
+    }
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case DB_NAME:
+      if (value == null) {
+        unsetDbName();
+      } else {
+        setDbName((String)value);
+      }
+      break;
+
+    case TBL_NAME:
+      if (value == null) {
+        unsetTblName();
+      } else {
+        setTblName((String)value);
+      }
+      break;
+
+    case EXPR:
+      if (value == null) {
+        unsetExpr();
+      } else {
+        setExpr((ByteBuffer)value);
+      }
+      break;
+
+    case DEFAULT_PARTITION_NAME:
+      if (value == null) {
+        unsetDefaultPartitionName();
+      } else {
+        setDefaultPartitionName((String)value);
+      }
+      break;
+
+    case MAX_PARTS:
+      if (value == null) {
+        unsetMaxParts();
+      } else {
+        setMaxParts((Short)value);
+      }
+      break;
+
+    case CAT_NAME:
+      if (value == null) {
+        unsetCatName();
+      } else {
+        setCatName((String)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case DB_NAME:
+      return getDbName();
+
+    case TBL_NAME:
+      return getTblName();
+
+    case EXPR:
+      return getExpr();
+
+    case DEFAULT_PARTITION_NAME:
+      return getDefaultPartitionName();
+
+    case MAX_PARTS:
+      return getMaxParts();
+
+    case CAT_NAME:
+      return getCatName();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case DB_NAME:
+      return isSetDbName();
+    case TBL_NAME:
+      return isSetTblName();
+    case EXPR:
+      return isSetExpr();
+    case DEFAULT_PARTITION_NAME:
+      return isSetDefaultPartitionName();
+    case MAX_PARTS:
+      return isSetMaxParts();
+    case CAT_NAME:
+      return isSetCatName();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionsByExprRequest)
+      return this.equals((PartitionsByExprRequest)that);
+    return false;
+  }
+
+  public boolean equals(PartitionsByExprRequest that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_dbName = true && this.isSetDbName();
+    boolean that_present_dbName = true && that.isSetDbName();
+    if (this_present_dbName || that_present_dbName) {
+      if (!(this_present_dbName && that_present_dbName))
+        return false;
+      if (!this.dbName.equals(that.dbName))
+        return false;
+    }
+
+    boolean this_present_tblName = true && this.isSetTblName();
+    boolean that_present_tblName = true && that.isSetTblName();
+    if (this_present_tblName || that_present_tblName) {
+      if (!(this_present_tblName && that_present_tblName))
+        return false;
+      if (!this.tblName.equals(that.tblName))
+        return false;
+    }
+
+    boolean this_present_expr = true && this.isSetExpr();
+    boolean that_present_expr = true && that.isSetExpr();
+    if (this_present_expr || that_present_expr) {
+      if (!(this_present_expr && that_present_expr))
+        return false;
+      if (!this.expr.equals(that.expr))
+        return false;
+    }
+
+    boolean this_present_defaultPartitionName = true && this.isSetDefaultPartitionName();
+    boolean that_present_defaultPartitionName = true && that.isSetDefaultPartitionName();
+    if (this_present_defaultPartitionName || that_present_defaultPartitionName) {
+      if (!(this_present_defaultPartitionName && that_present_defaultPartitionName))
+        return false;
+      if (!this.defaultPartitionName.equals(that.defaultPartitionName))
+        return false;
+    }
+
+    boolean this_present_maxParts = true && this.isSetMaxParts();
+    boolean that_present_maxParts = true && that.isSetMaxParts();
+    if (this_present_maxParts || that_present_maxParts) {
+      if (!(this_present_maxParts && that_present_maxParts))
+        return false;
+      if (this.maxParts != that.maxParts)
+        return false;
+    }
+
+    boolean this_present_catName = true && this.isSetCatName();
+    boolean that_present_catName = true && that.isSetCatName();
+    if (this_present_catName || that_present_catName) {
+      if (!(this_present_catName && that_present_catName))
+        return false;
+      if (!this.catName.equals(that.catName))
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_dbName = true && (isSetDbName());
+    list.add(present_dbName);
+    if (present_dbName)
+      list.add(dbName);
+
+    boolean present_tblName = true && (isSetTblName());
+    list.add(present_tblName);
+    if (present_tblName)
+      list.add(tblName);
+
+    boolean present_expr = true && (isSetExpr());
+    list.add(present_expr);
+    if (present_expr)
+      list.add(expr);
+
+    boolean present_defaultPartitionName = true && (isSetDefaultPartitionName());
+    list.add(present_defaultPartitionName);
+    if (present_defaultPartitionName)
+      list.add(defaultPartitionName);
+
+    boolean present_maxParts = true && (isSetMaxParts());
+    list.add(present_maxParts);
+    if (present_maxParts)
+      list.add(maxParts);
+
+    boolean present_catName = true && (isSetCatName());
+    list.add(present_catName);
+    if (present_catName)
+      list.add(catName);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionsByExprRequest other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetDbName()).compareTo(other.isSetDbName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDbName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.dbName, other.dbName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetTblName()).compareTo(other.isSetTblName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetTblName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tblName, other.tblName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetExpr()).compareTo(other.isSetExpr());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetExpr()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.expr, other.expr);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetDefaultPartitionName()).compareTo(other.isSetDefaultPartitionName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetDefaultPartitionName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.defaultPartitionName, other.defaultPartitionName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetMaxParts()).compareTo(other.isSetMaxParts());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetMaxParts()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxParts, other.maxParts);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetCatName()).compareTo(other.isSetCatName());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetCatName()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catName, other.catName);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionsByExprRequest(");
+    boolean first = true;
+
+    sb.append("dbName:");
+    if (this.dbName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.dbName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("tblName:");
+    if (this.tblName == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.tblName);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("expr:");
+    if (this.expr == null) {
+      sb.append("null");
+    } else {
+      org.apache.thrift.TBaseHelper.toString(this.expr, sb);
+    }
+    first = false;
+    if (isSetDefaultPartitionName()) {
+      if (!first) sb.append(", ");
+      sb.append("defaultPartitionName:");
+      if (this.defaultPartitionName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.defaultPartitionName);
+      }
+      first = false;
+    }
+    if (isSetMaxParts()) {
+      if (!first) sb.append(", ");
+      sb.append("maxParts:");
+      sb.append(this.maxParts);
+      first = false;
+    }
+    if (isSetCatName()) {
+      if (!first) sb.append(", ");
+      sb.append("catName:");
+      if (this.catName == null) {
+        sb.append("null");
+      } else {
+        sb.append(this.catName);
+      }
+      first = false;
+    }
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetDbName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'dbName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetTblName()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'tblName' is unset! Struct:" + toString());
+    }
+
+    if (!isSetExpr()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'expr' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionsByExprRequestStandardSchemeFactory implements SchemeFactory {
+    public PartitionsByExprRequestStandardScheme getScheme() {
+      return new PartitionsByExprRequestStandardScheme();
+    }
+  }
+
+  private static class PartitionsByExprRequestStandardScheme extends StandardScheme<PartitionsByExprRequest> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRequest struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // DB_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.dbName = iprot.readString();
+              struct.setDbNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // TBL_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.tblName = iprot.readString();
+              struct.setTblNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 3: // EXPR
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.expr = iprot.readBinary();
+              struct.setExprIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 4: // DEFAULT_PARTITION_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.defaultPartitionName = iprot.readString();
+              struct.setDefaultPartitionNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 5: // MAX_PARTS
+            if (schemeField.type == org.apache.thrift.protocol.TType.I16) {
+              struct.maxParts = iprot.readI16();
+              struct.setMaxPartsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 6: // CAT_NAME
+            if (schemeField.type == org.apache.thrift.protocol.TType.STRING) {
+              struct.catName = iprot.readString();
+              struct.setCatNameIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRequest struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.dbName != null) {
+        oprot.writeFieldBegin(DB_NAME_FIELD_DESC);
+        oprot.writeString(struct.dbName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.tblName != null) {
+        oprot.writeFieldBegin(TBL_NAME_FIELD_DESC);
+        oprot.writeString(struct.tblName);
+        oprot.writeFieldEnd();
+      }
+      if (struct.expr != null) {
+        oprot.writeFieldBegin(EXPR_FIELD_DESC);
+        oprot.writeBinary(struct.expr);
+        oprot.writeFieldEnd();
+      }
+      if (struct.defaultPartitionName != null) {
+        if (struct.isSetDefaultPartitionName()) {
+          oprot.writeFieldBegin(DEFAULT_PARTITION_NAME_FIELD_DESC);
+          oprot.writeString(struct.defaultPartitionName);
+          oprot.writeFieldEnd();
+        }
+      }
+      if (struct.isSetMaxParts()) {
+        oprot.writeFieldBegin(MAX_PARTS_FIELD_DESC);
+        oprot.writeI16(struct.maxParts);
+        oprot.writeFieldEnd();
+      }
+      if (struct.catName != null) {
+        if (struct.isSetCatName()) {
+          oprot.writeFieldBegin(CAT_NAME_FIELD_DESC);
+          oprot.writeString(struct.catName);
+          oprot.writeFieldEnd();
+        }
+      }
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionsByExprRequestTupleSchemeFactory implements SchemeFactory {
+    public PartitionsByExprRequestTupleScheme getScheme() {
+      return new PartitionsByExprRequestTupleScheme();
+    }
+  }
+
+  private static class PartitionsByExprRequestTupleScheme extends TupleScheme<PartitionsByExprRequest> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      oprot.writeString(struct.dbName);
+      oprot.writeString(struct.tblName);
+      oprot.writeBinary(struct.expr);
+      BitSet optionals = new BitSet();
+      if (struct.isSetDefaultPartitionName()) {
+        optionals.set(0);
+      }
+      if (struct.isSetMaxParts()) {
+        optionals.set(1);
+      }
+      if (struct.isSetCatName()) {
+        optionals.set(2);
+      }
+      oprot.writeBitSet(optionals, 3);
+      if (struct.isSetDefaultPartitionName()) {
+        oprot.writeString(struct.defaultPartitionName);
+      }
+      if (struct.isSetMaxParts()) {
+        oprot.writeI16(struct.maxParts);
+      }
+      if (struct.isSetCatName()) {
+        oprot.writeString(struct.catName);
+      }
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRequest struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      struct.dbName = iprot.readString();
+      struct.setDbNameIsSet(true);
+      struct.tblName = iprot.readString();
+      struct.setTblNameIsSet(true);
+      struct.expr = iprot.readBinary();
+      struct.setExprIsSet(true);
+      BitSet incoming = iprot.readBitSet(3);
+      if (incoming.get(0)) {
+        struct.defaultPartitionName = iprot.readString();
+        struct.setDefaultPartitionNameIsSet(true);
+      }
+      if (incoming.get(1)) {
+        struct.maxParts = iprot.readI16();
+        struct.setMaxPartsIsSet(true);
+      }
+      if (incoming.get(2)) {
+        struct.catName = iprot.readString();
+        struct.setCatNameIsSet(true);
+      }
+    }
+  }
+
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
new file mode 100644
index 0000000..3f2ddcc
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java
@@ -0,0 +1,542 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+package org.apache.hadoop.hive.metastore.api;
+
+import org.apache.thrift.scheme.IScheme;
+import org.apache.thrift.scheme.SchemeFactory;
+import org.apache.thrift.scheme.StandardScheme;
+
+import org.apache.thrift.scheme.TupleScheme;
+import org.apache.thrift.protocol.TTupleProtocol;
+import org.apache.thrift.protocol.TProtocolException;
+import org.apache.thrift.EncodingUtils;
+import org.apache.thrift.TException;
+import org.apache.thrift.async.AsyncMethodCallback;
+import org.apache.thrift.server.AbstractNonblockingServer.*;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Map;
+import java.util.HashMap;
+import java.util.EnumMap;
+import java.util.Set;
+import java.util.HashSet;
+import java.util.EnumSet;
+import java.util.Collections;
+import java.util.BitSet;
+import java.nio.ByteBuffer;
+import java.util.Arrays;
+import javax.annotation.Generated;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"})
+@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)")
+@org.apache.hadoop.classification.InterfaceAudience.Public @org.apache.hadoop.classification.InterfaceStability.Stable public class PartitionsByExprResult implements org.apache.thrift.TBase<PartitionsByExprResult, PartitionsByExprResult._Fields>, java.io.Serializable, Cloneable, Comparable<PartitionsByExprResult> {
+  private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("PartitionsByExprResult");
+
+  private static final org.apache.thrift.protocol.TField PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("partitions", org.apache.thrift.protocol.TType.LIST, (short)1);
+  private static final org.apache.thrift.protocol.TField HAS_UNKNOWN_PARTITIONS_FIELD_DESC = new org.apache.thrift.protocol.TField("hasUnknownPartitions", org.apache.thrift.protocol.TType.BOOL, (short)2);
+
+  private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
+  static {
+    schemes.put(StandardScheme.class, new PartitionsByExprResultStandardSchemeFactory());
+    schemes.put(TupleScheme.class, new PartitionsByExprResultTupleSchemeFactory());
+  }
+
+  private List<Partition> partitions; // required
+  private boolean hasUnknownPartitions; // required
+
+  /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+  public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+    PARTITIONS((short)1, "partitions"),
+    HAS_UNKNOWN_PARTITIONS((short)2, "hasUnknownPartitions");
+
+    private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
+
+    static {
+      for (_Fields field : EnumSet.allOf(_Fields.class)) {
+        byName.put(field.getFieldName(), field);
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, or null if its not found.
+     */
+    public static _Fields findByThriftId(int fieldId) {
+      switch(fieldId) {
+        case 1: // PARTITIONS
+          return PARTITIONS;
+        case 2: // HAS_UNKNOWN_PARTITIONS
+          return HAS_UNKNOWN_PARTITIONS;
+        default:
+          return null;
+      }
+    }
+
+    /**
+     * Find the _Fields constant that matches fieldId, throwing an exception
+     * if it is not found.
+     */
+    public static _Fields findByThriftIdOrThrow(int fieldId) {
+      _Fields fields = findByThriftId(fieldId);
+      if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+      return fields;
+    }
+
+    /**
+     * Find the _Fields constant that matches name, or null if its not found.
+     */
+    public static _Fields findByName(String name) {
+      return byName.get(name);
+    }
+
+    private final short _thriftId;
+    private final String _fieldName;
+
+    _Fields(short thriftId, String fieldName) {
+      _thriftId = thriftId;
+      _fieldName = fieldName;
+    }
+
+    public short getThriftFieldId() {
+      return _thriftId;
+    }
+
+    public String getFieldName() {
+      return _fieldName;
+    }
+  }
+
+  // isset id assignments
+  private static final int __HASUNKNOWNPARTITIONS_ISSET_ID = 0;
+  private byte __isset_bitfield = 0;
+  public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+  static {
+    Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+    tmpMap.put(_Fields.PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("partitions", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, 
+            new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Partition.class))));
+    tmpMap.put(_Fields.HAS_UNKNOWN_PARTITIONS, new org.apache.thrift.meta_data.FieldMetaData("hasUnknownPartitions", org.apache.thrift.TFieldRequirementType.REQUIRED, 
+        new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL)));
+    metaDataMap = Collections.unmodifiableMap(tmpMap);
+    org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(PartitionsByExprResult.class, metaDataMap);
+  }
+
+  public PartitionsByExprResult() {
+  }
+
+  public PartitionsByExprResult(
+    List<Partition> partitions,
+    boolean hasUnknownPartitions)
+  {
+    this();
+    this.partitions = partitions;
+    this.hasUnknownPartitions = hasUnknownPartitions;
+    setHasUnknownPartitionsIsSet(true);
+  }
+
+  /**
+   * Performs a deep copy on <i>other</i>.
+   */
+  public PartitionsByExprResult(PartitionsByExprResult other) {
+    __isset_bitfield = other.__isset_bitfield;
+    if (other.isSetPartitions()) {
+      List<Partition> __this__partitions = new ArrayList<Partition>(other.partitions.size());
+      for (Partition other_element : other.partitions) {
+        __this__partitions.add(new Partition(other_element));
+      }
+      this.partitions = __this__partitions;
+    }
+    this.hasUnknownPartitions = other.hasUnknownPartitions;
+  }
+
+  public PartitionsByExprResult deepCopy() {
+    return new PartitionsByExprResult(this);
+  }
+
+  @Override
+  public void clear() {
+    this.partitions = null;
+    setHasUnknownPartitionsIsSet(false);
+    this.hasUnknownPartitions = false;
+  }
+
+  public int getPartitionsSize() {
+    return (this.partitions == null) ? 0 : this.partitions.size();
+  }
+
+  public java.util.Iterator<Partition> getPartitionsIterator() {
+    return (this.partitions == null) ? null : this.partitions.iterator();
+  }
+
+  public void addToPartitions(Partition elem) {
+    if (this.partitions == null) {
+      this.partitions = new ArrayList<Partition>();
+    }
+    this.partitions.add(elem);
+  }
+
+  public List<Partition> getPartitions() {
+    return this.partitions;
+  }
+
+  public void setPartitions(List<Partition> partitions) {
+    this.partitions = partitions;
+  }
+
+  public void unsetPartitions() {
+    this.partitions = null;
+  }
+
+  /** Returns true if field partitions is set (has been assigned a value) and false otherwise */
+  public boolean isSetPartitions() {
+    return this.partitions != null;
+  }
+
+  public void setPartitionsIsSet(boolean value) {
+    if (!value) {
+      this.partitions = null;
+    }
+  }
+
+  public boolean isHasUnknownPartitions() {
+    return this.hasUnknownPartitions;
+  }
+
+  public void setHasUnknownPartitions(boolean hasUnknownPartitions) {
+    this.hasUnknownPartitions = hasUnknownPartitions;
+    setHasUnknownPartitionsIsSet(true);
+  }
+
+  public void unsetHasUnknownPartitions() {
+    __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __HASUNKNOWNPARTITIONS_ISSET_ID);
+  }
+
+  /** Returns true if field hasUnknownPartitions is set (has been assigned a value) and false otherwise */
+  public boolean isSetHasUnknownPartitions() {
+    return EncodingUtils.testBit(__isset_bitfield, __HASUNKNOWNPARTITIONS_ISSET_ID);
+  }
+
+  public void setHasUnknownPartitionsIsSet(boolean value) {
+    __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __HASUNKNOWNPARTITIONS_ISSET_ID, value);
+  }
+
+  public void setFieldValue(_Fields field, Object value) {
+    switch (field) {
+    case PARTITIONS:
+      if (value == null) {
+        unsetPartitions();
+      } else {
+        setPartitions((List<Partition>)value);
+      }
+      break;
+
+    case HAS_UNKNOWN_PARTITIONS:
+      if (value == null) {
+        unsetHasUnknownPartitions();
+      } else {
+        setHasUnknownPartitions((Boolean)value);
+      }
+      break;
+
+    }
+  }
+
+  public Object getFieldValue(_Fields field) {
+    switch (field) {
+    case PARTITIONS:
+      return getPartitions();
+
+    case HAS_UNKNOWN_PARTITIONS:
+      return isHasUnknownPartitions();
+
+    }
+    throw new IllegalStateException();
+  }
+
+  /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+  public boolean isSet(_Fields field) {
+    if (field == null) {
+      throw new IllegalArgumentException();
+    }
+
+    switch (field) {
+    case PARTITIONS:
+      return isSetPartitions();
+    case HAS_UNKNOWN_PARTITIONS:
+      return isSetHasUnknownPartitions();
+    }
+    throw new IllegalStateException();
+  }
+
+  @Override
+  public boolean equals(Object that) {
+    if (that == null)
+      return false;
+    if (that instanceof PartitionsByExprResult)
+      return this.equals((PartitionsByExprResult)that);
+    return false;
+  }
+
+  public boolean equals(PartitionsByExprResult that) {
+    if (that == null)
+      return false;
+
+    boolean this_present_partitions = true && this.isSetPartitions();
+    boolean that_present_partitions = true && that.isSetPartitions();
+    if (this_present_partitions || that_present_partitions) {
+      if (!(this_present_partitions && that_present_partitions))
+        return false;
+      if (!this.partitions.equals(that.partitions))
+        return false;
+    }
+
+    boolean this_present_hasUnknownPartitions = true;
+    boolean that_present_hasUnknownPartitions = true;
+    if (this_present_hasUnknownPartitions || that_present_hasUnknownPartitions) {
+      if (!(this_present_hasUnknownPartitions && that_present_hasUnknownPartitions))
+        return false;
+      if (this.hasUnknownPartitions != that.hasUnknownPartitions)
+        return false;
+    }
+
+    return true;
+  }
+
+  @Override
+  public int hashCode() {
+    List<Object> list = new ArrayList<Object>();
+
+    boolean present_partitions = true && (isSetPartitions());
+    list.add(present_partitions);
+    if (present_partitions)
+      list.add(partitions);
+
+    boolean present_hasUnknownPartitions = true;
+    list.add(present_hasUnknownPartitions);
+    if (present_hasUnknownPartitions)
+      list.add(hasUnknownPartitions);
+
+    return list.hashCode();
+  }
+
+  @Override
+  public int compareTo(PartitionsByExprResult other) {
+    if (!getClass().equals(other.getClass())) {
+      return getClass().getName().compareTo(other.getClass().getName());
+    }
+
+    int lastComparison = 0;
+
+    lastComparison = Boolean.valueOf(isSetPartitions()).compareTo(other.isSetPartitions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetPartitions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.partitions, other.partitions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    lastComparison = Boolean.valueOf(isSetHasUnknownPartitions()).compareTo(other.isSetHasUnknownPartitions());
+    if (lastComparison != 0) {
+      return lastComparison;
+    }
+    if (isSetHasUnknownPartitions()) {
+      lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.hasUnknownPartitions, other.hasUnknownPartitions);
+      if (lastComparison != 0) {
+        return lastComparison;
+      }
+    }
+    return 0;
+  }
+
+  public _Fields fieldForId(int fieldId) {
+    return _Fields.findByThriftId(fieldId);
+  }
+
+  public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+    schemes.get(iprot.getScheme()).getScheme().read(iprot, this);
+  }
+
+  public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+    schemes.get(oprot.getScheme()).getScheme().write(oprot, this);
+  }
+
+  @Override
+  public String toString() {
+    StringBuilder sb = new StringBuilder("PartitionsByExprResult(");
+    boolean first = true;
+
+    sb.append("partitions:");
+    if (this.partitions == null) {
+      sb.append("null");
+    } else {
+      sb.append(this.partitions);
+    }
+    first = false;
+    if (!first) sb.append(", ");
+    sb.append("hasUnknownPartitions:");
+    sb.append(this.hasUnknownPartitions);
+    first = false;
+    sb.append(")");
+    return sb.toString();
+  }
+
+  public void validate() throws org.apache.thrift.TException {
+    // check for required fields
+    if (!isSetPartitions()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'partitions' is unset! Struct:" + toString());
+    }
+
+    if (!isSetHasUnknownPartitions()) {
+      throw new org.apache.thrift.protocol.TProtocolException("Required field 'hasUnknownPartitions' is unset! Struct:" + toString());
+    }
+
+    // check for sub-struct validity
+  }
+
+  private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+    try {
+      write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+    try {
+      // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor.
+      __isset_bitfield = 0;
+      read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+    } catch (org.apache.thrift.TException te) {
+      throw new java.io.IOException(te);
+    }
+  }
+
+  private static class PartitionsByExprResultStandardSchemeFactory implements SchemeFactory {
+    public PartitionsByExprResultStandardScheme getScheme() {
+      return new PartitionsByExprResultStandardScheme();
+    }
+  }
+
+  private static class PartitionsByExprResultStandardScheme extends StandardScheme<PartitionsByExprResult> {
+
+    public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprResult struct) throws org.apache.thrift.TException {
+      org.apache.thrift.protocol.TField schemeField;
+      iprot.readStructBegin();
+      while (true)
+      {
+        schemeField = iprot.readFieldBegin();
+        if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { 
+          break;
+        }
+        switch (schemeField.id) {
+          case 1: // PARTITIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
+              {
+                org.apache.thrift.protocol.TList _list416 = iprot.readListBegin();
+                struct.partitions = new ArrayList<Partition>(_list416.size);
+                Partition _elem417;
+                for (int _i418 = 0; _i418 < _list416.size; ++_i418)
+                {
+                  _elem417 = new Partition();
+                  _elem417.read(iprot);
+                  struct.partitions.add(_elem417);
+                }
+                iprot.readListEnd();
+              }
+              struct.setPartitionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          case 2: // HAS_UNKNOWN_PARTITIONS
+            if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) {
+              struct.hasUnknownPartitions = iprot.readBool();
+              struct.setHasUnknownPartitionsIsSet(true);
+            } else { 
+              org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+            }
+            break;
+          default:
+            org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type);
+        }
+        iprot.readFieldEnd();
+      }
+      iprot.readStructEnd();
+      struct.validate();
+    }
+
+    public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprResult struct) throws org.apache.thrift.TException {
+      struct.validate();
+
+      oprot.writeStructBegin(STRUCT_DESC);
+      if (struct.partitions != null) {
+        oprot.writeFieldBegin(PARTITIONS_FIELD_DESC);
+        {
+          oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size()));
+          for (Partition _iter419 : struct.partitions)
+          {
+            _iter419.write(oprot);
+          }
+          oprot.writeListEnd();
+        }
+        oprot.writeFieldEnd();
+      }
+      oprot.writeFieldBegin(HAS_UNKNOWN_PARTITIONS_FIELD_DESC);
+      oprot.writeBool(struct.hasUnknownPartitions);
+      oprot.writeFieldEnd();
+      oprot.writeFieldStop();
+      oprot.writeStructEnd();
+    }
+
+  }
+
+  private static class PartitionsByExprResultTupleSchemeFactory implements SchemeFactory {
+    public PartitionsByExprResultTupleScheme getScheme() {
+      return new PartitionsByExprResultTupleScheme();
+    }
+  }
+
+  private static class PartitionsByExprResultTupleScheme extends TupleScheme<PartitionsByExprResult> {
+
+    @Override
+    public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol oprot = (TTupleProtocol) prot;
+      {
+        oprot.writeI32(struct.partitions.size());
+        for (Partition _iter420 : struct.partitions)
+        {
+          _iter420.write(oprot);
+        }
+      }
+      oprot.writeBool(struct.hasUnknownPartitions);
+    }
+
+    @Override
+    public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException {
+      TTupleProtocol iprot = (TTupleProtocol) prot;
+      {
+        org.apache.thrift.protocol.TList _list421 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
+        struct.partitions = new ArrayList<Partition>(_list421.size);
+        Partition _elem422;
+        for (int _i423 = 0; _i423 < _list421.size; ++_i423)
+        {
+          _elem422 = new Partition();
+          _elem422.read(iprot);
+          struct.partitions.add(_elem422);
+        }
+      }
+      struct.setPartitionsIsSet(true);
+      struct.hasUnknownPartitions = iprot.readBool();
+      struct.setHasUnknownPartitionsIsSet(true);
+    }
+  }
+
+}
+


[49/91] [abbrv] [partial] hive git commit: HIVE-20097 : Convert standalone-metastore to a submodule (Alexander Kolbasov reviewed by Vihang Karajgaonkar)

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/20eb7b51/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
----------------------------------------------------------------------
diff --git a/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
new file mode 100644
index 0000000..a816ae7
--- /dev/null
+++ b/standalone-metastore/metastore-common/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp
@@ -0,0 +1,94840 @@
+/**
+ * Autogenerated by Thrift Compiler (0.9.3)
+ *
+ * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
+ *  @generated
+ */
+#include "ThriftHiveMetastore.h"
+
+namespace Apache { namespace Hadoop { namespace Hive {
+
+
+ThriftHiveMetastore_getMetaConf_args::~ThriftHiveMetastore_getMetaConf_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_getMetaConf_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->key);
+          this->__isset.key = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_getMetaConf_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_getMetaConf_args");
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->key);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_getMetaConf_pargs::~ThriftHiveMetastore_getMetaConf_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_getMetaConf_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_getMetaConf_pargs");
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->key)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_getMetaConf_result::~ThriftHiveMetastore_getMetaConf_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_getMetaConf_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->success);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_getMetaConf_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_getMetaConf_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0);
+    xfer += oprot->writeString(this->success);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_getMetaConf_presult::~ThriftHiveMetastore_getMetaConf_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_getMetaConf_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString((*(this->success)));
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_setMetaConf_args::~ThriftHiveMetastore_setMetaConf_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_setMetaConf_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->key);
+          this->__isset.key = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->value);
+          this->__isset.value = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_setMetaConf_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_setMetaConf_args");
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->key);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString(this->value);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_setMetaConf_pargs::~ThriftHiveMetastore_setMetaConf_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_setMetaConf_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_setMetaConf_pargs");
+
+  xfer += oprot->writeFieldBegin("key", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->key)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("value", ::apache::thrift::protocol::T_STRING, 2);
+  xfer += oprot->writeString((*(this->value)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_setMetaConf_result::~ThriftHiveMetastore_setMetaConf_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_setMetaConf_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_setMetaConf_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_setMetaConf_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_setMetaConf_presult::~ThriftHiveMetastore_setMetaConf_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_setMetaConf_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_catalog_args::~ThriftHiveMetastore_create_catalog_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->catalog.read(iprot);
+          this->__isset.catalog = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_create_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_args");
+
+  xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->catalog.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_catalog_pargs::~ThriftHiveMetastore_create_catalog_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_pargs");
+
+  xfer += oprot->writeFieldBegin("catalog", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->catalog)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_catalog_result::~ThriftHiveMetastore_create_catalog_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_create_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_catalog_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o3) {
+    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_catalog_presult::~ThriftHiveMetastore_create_catalog_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_args::~ThriftHiveMetastore_alter_catalog_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->rqst.read(iprot);
+          this->__isset.rqst = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_alter_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_args");
+
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->rqst.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_pargs::~ThriftHiveMetastore_alter_catalog_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_pargs");
+
+  xfer += oprot->writeFieldBegin("rqst", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->rqst)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_result::~ThriftHiveMetastore_alter_catalog_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_alter_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_catalog_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o3) {
+    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_catalog_presult::~ThriftHiveMetastore_alter_catalog_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalog_args::~ThriftHiveMetastore_get_catalog_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->catName.read(iprot);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_args");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->catName.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalog_pargs::~ThriftHiveMetastore_get_catalog_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_pargs");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->catName)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalog_result::~ThriftHiveMetastore_get_catalog_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalog_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalog_presult::~ThriftHiveMetastore_get_catalog_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalogs_args::~ThriftHiveMetastore_get_catalogs_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalogs_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    xfer += iprot->skip(ftype);
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_catalogs_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_args");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalogs_pargs::~ThriftHiveMetastore_get_catalogs_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalogs_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_pargs");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalogs_result::~ThriftHiveMetastore_get_catalogs_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalogs_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_catalogs_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_catalogs_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_catalogs_presult::~ThriftHiveMetastore_get_catalogs_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_catalogs_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_catalog_args::~ThriftHiveMetastore_drop_catalog_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_catalog_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->catName.read(iprot);
+          this->__isset.catName = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_drop_catalog_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_args");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->catName.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_catalog_pargs::~ThriftHiveMetastore_drop_catalog_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_catalog_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_pargs");
+
+  xfer += oprot->writeFieldBegin("catName", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->catName)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_catalog_result::~ThriftHiveMetastore_drop_catalog_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_catalog_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_drop_catalog_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_catalog_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o3) {
+    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_catalog_presult::~ThriftHiveMetastore_drop_catalog_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_catalog_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_database_args::~ThriftHiveMetastore_create_database_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_database_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->database.read(iprot);
+          this->__isset.database = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_create_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_args");
+
+  xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->database.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_database_pargs::~ThriftHiveMetastore_create_database_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_pargs");
+
+  xfer += oprot->writeFieldBegin("database", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += (*(this->database)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_database_result::~ThriftHiveMetastore_create_database_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_database_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_create_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_database_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o3) {
+    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_database_presult::~ThriftHiveMetastore_create_database_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_database_args::~ThriftHiveMetastore_get_database_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_database_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->name);
+          this->__isset.name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_args");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_database_pargs::~ThriftHiveMetastore_get_database_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_pargs");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->name)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_database_result::~ThriftHiveMetastore_get_database_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_database_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_database_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_database_presult::~ThriftHiveMetastore_get_database_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_database_args::~ThriftHiveMetastore_drop_database_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_database_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->name);
+          this->__isset.name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->deleteData);
+          this->__isset.deleteData = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_BOOL) {
+          xfer += iprot->readBool(this->cascade);
+          this->__isset.cascade = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_drop_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_args");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2);
+  xfer += oprot->writeBool(this->deleteData);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3);
+  xfer += oprot->writeBool(this->cascade);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_database_pargs::~ThriftHiveMetastore_drop_database_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_pargs");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->name)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("deleteData", ::apache::thrift::protocol::T_BOOL, 2);
+  xfer += oprot->writeBool((*(this->deleteData)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("cascade", ::apache::thrift::protocol::T_BOOL, 3);
+  xfer += oprot->writeBool((*(this->cascade)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_database_result::~ThriftHiveMetastore_drop_database_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_database_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_drop_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_drop_database_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o3) {
+    xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3);
+    xfer += this->o3.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_drop_database_presult::~ThriftHiveMetastore_drop_database_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_drop_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 3:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o3.read(iprot);
+          this->__isset.o3 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_databases_args::~ThriftHiveMetastore_get_databases_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->pattern);
+          this->__isset.pattern = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_args");
+
+  xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->pattern);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_databases_pargs::~ThriftHiveMetastore_get_databases_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_pargs");
+
+  xfer += oprot->writeFieldBegin("pattern", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->pattern)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_databases_result::~ThriftHiveMetastore_get_databases_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->success.clear();
+            uint32_t _size1226;
+            ::apache::thrift::protocol::TType _etype1229;
+            xfer += iprot->readListBegin(_etype1229, _size1226);
+            this->success.resize(_size1226);
+            uint32_t _i1230;
+            for (_i1230 = 0; _i1230 < _size1226; ++_i1230)
+            {
+              xfer += iprot->readString(this->success[_i1230]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_databases_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+      std::vector<std::string> ::const_iterator _iter1231;
+      for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231)
+      {
+        xfer += oprot->writeString((*_iter1231));
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_databases_presult::~ThriftHiveMetastore_get_databases_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            (*(this->success)).clear();
+            uint32_t _size1232;
+            ::apache::thrift::protocol::TType _etype1235;
+            xfer += iprot->readListBegin(_etype1235, _size1232);
+            (*(this->success)).resize(_size1232);
+            uint32_t _i1236;
+            for (_i1236 = 0; _i1236 < _size1232; ++_i1236)
+            {
+              xfer += iprot->readString((*(this->success))[_i1236]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_all_databases_args::~ThriftHiveMetastore_get_all_databases_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_all_databases_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    xfer += iprot->skip(ftype);
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_all_databases_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_args");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_all_databases_pargs::~ThriftHiveMetastore_get_all_databases_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_all_databases_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_pargs");
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_all_databases_result::~ThriftHiveMetastore_get_all_databases_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            this->success.clear();
+            uint32_t _size1237;
+            ::apache::thrift::protocol::TType _etype1240;
+            xfer += iprot->readListBegin(_etype1240, _size1237);
+            this->success.resize(_size1237);
+            uint32_t _i1241;
+            for (_i1241 = 0; _i1241 < _size1237; ++_i1241)
+            {
+              xfer += iprot->readString(this->success[_i1241]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_all_databases_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0);
+    {
+      xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast<uint32_t>(this->success.size()));
+      std::vector<std::string> ::const_iterator _iter1242;
+      for (_iter1242 = this->success.begin(); _iter1242 != this->success.end(); ++_iter1242)
+      {
+        xfer += oprot->writeString((*_iter1242));
+      }
+      xfer += oprot->writeListEnd();
+    }
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_all_databases_presult::~ThriftHiveMetastore_get_all_databases_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_LIST) {
+          {
+            (*(this->success)).clear();
+            uint32_t _size1243;
+            ::apache::thrift::protocol::TType _etype1246;
+            xfer += iprot->readListBegin(_etype1246, _size1243);
+            (*(this->success)).resize(_size1243);
+            uint32_t _i1247;
+            for (_i1247 = 0; _i1247 < _size1243; ++_i1247)
+            {
+              xfer += iprot->readString((*(this->success))[_i1247]);
+            }
+            xfer += iprot->readListEnd();
+          }
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_database_args::~ThriftHiveMetastore_alter_database_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_database_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->dbname);
+          this->__isset.dbname = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->db.read(iprot);
+          this->__isset.db = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_alter_database_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_args");
+
+  xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->dbname);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2);
+  xfer += this->db.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_database_pargs::~ThriftHiveMetastore_alter_database_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_database_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_pargs");
+
+  xfer += oprot->writeFieldBegin("dbname", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->dbname)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldBegin("db", ::apache::thrift::protocol::T_STRUCT, 2);
+  xfer += (*(this->db)).write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_database_result::~ThriftHiveMetastore_alter_database_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_database_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_alter_database_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_alter_database_result");
+
+  if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_alter_database_presult::~ThriftHiveMetastore_alter_database_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_alter_database_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_type_args::~ThriftHiveMetastore_get_type_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_type_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRING) {
+          xfer += iprot->readString(this->name);
+          this->__isset.name = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_args");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString(this->name);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_type_pargs::~ThriftHiveMetastore_get_type_pargs() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_type_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_pargs");
+
+  xfer += oprot->writeFieldBegin("name", ::apache::thrift::protocol::T_STRING, 1);
+  xfer += oprot->writeString((*(this->name)));
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_type_result::~ThriftHiveMetastore_get_type_result() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_type_result::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->success.read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_get_type_result::write(::apache::thrift::protocol::TProtocol* oprot) const {
+
+  uint32_t xfer = 0;
+
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_type_result");
+
+  if (this->__isset.success) {
+    xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0);
+    xfer += this->success.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o1) {
+    xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1);
+    xfer += this->o1.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  } else if (this->__isset.o2) {
+    xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2);
+    xfer += this->o2.write(oprot);
+    xfer += oprot->writeFieldEnd();
+  }
+  xfer += oprot->writeFieldStop();
+  xfer += oprot->writeStructEnd();
+  return xfer;
+}
+
+
+ThriftHiveMetastore_get_type_presult::~ThriftHiveMetastore_get_type_presult() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_get_type_presult::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 0:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += (*(this->success)).read(iprot);
+          this->__isset.success = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o1.read(iprot);
+          this->__isset.o1 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      case 2:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->o2.read(iprot);
+          this->__isset.o2 = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+
+ThriftHiveMetastore_create_type_args::~ThriftHiveMetastore_create_type_args() throw() {
+}
+
+
+uint32_t ThriftHiveMetastore_create_type_args::read(::apache::thrift::protocol::TProtocol* iprot) {
+
+  apache::thrift::protocol::TInputRecursionTracker tracker(*iprot);
+  uint32_t xfer = 0;
+  std::string fname;
+  ::apache::thrift::protocol::TType ftype;
+  int16_t fid;
+
+  xfer += iprot->readStructBegin(fname);
+
+  using ::apache::thrift::protocol::TProtocolException;
+
+
+  while (true)
+  {
+    xfer += iprot->readFieldBegin(fname, ftype, fid);
+    if (ftype == ::apache::thrift::protocol::T_STOP) {
+      break;
+    }
+    switch (fid)
+    {
+      case 1:
+        if (ftype == ::apache::thrift::protocol::T_STRUCT) {
+          xfer += this->type.read(iprot);
+          this->__isset.type = true;
+        } else {
+          xfer += iprot->skip(ftype);
+        }
+        break;
+      default:
+        xfer += iprot->skip(ftype);
+        break;
+    }
+    xfer += iprot->readFieldEnd();
+  }
+
+  xfer += iprot->readStructEnd();
+
+  return xfer;
+}
+
+uint32_t ThriftHiveMetastore_create_type_args::write(::apache::thrift::protocol::TProtocol* oprot) const {
+  uint32_t xfer = 0;
+  apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot);
+  xfer += oprot->writeStructBegin("ThriftHiveMetastore_create_type_args");
+
+  xfer += oprot->writeFieldBegin("type", ::apache::thrift::protocol::T_STRUCT, 1);
+  xfer += this->type.write(oprot);
+  xfer += oprot->writeFieldEnd();
+
+  xfer += oprot->writeFieldStop(

<TRUNCATED>

[74/91] [abbrv] hive git commit: HIVE-19416 : merge master into branch (Sergey Shelukhin) 0712

Posted by se...@apache.org.
http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
index 0000000,a7ca05a..af9b0b1
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/StatsSetupConst.java
@@@ -1,0 -1,335 +1,339 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.common;
+ 
+ import java.io.IOException;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.TreeMap;
+ 
+ import com.google.common.collect.ImmutableList;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ 
+ import com.fasterxml.jackson.annotation.JsonInclude;
+ import com.fasterxml.jackson.annotation.JsonProperty;
+ import com.fasterxml.jackson.core.JsonGenerator;
+ import com.fasterxml.jackson.core.JsonParser;
+ import com.fasterxml.jackson.core.JsonProcessingException;
+ import com.fasterxml.jackson.databind.DeserializationContext;
+ import com.fasterxml.jackson.databind.JsonDeserializer;
+ import com.fasterxml.jackson.databind.JsonSerializer;
+ import com.fasterxml.jackson.databind.ObjectMapper;
+ import com.fasterxml.jackson.databind.ObjectReader;
+ import com.fasterxml.jackson.databind.ObjectWriter;
+ import com.fasterxml.jackson.databind.SerializerProvider;
+ import com.fasterxml.jackson.databind.annotation.JsonDeserialize;
+ import com.fasterxml.jackson.databind.annotation.JsonSerialize;
+ 
+ 
+ /**
+  * A class that defines the constant strings used by the statistics implementation.
+  */
+ 
+ public class StatsSetupConst {
+ 
+   protected static final Logger LOG = LoggerFactory.getLogger(StatsSetupConst.class.getName());
+ 
+   public enum StatDB {
+     fs {
+       @Override
+       public String getPublisher(Configuration conf) {
+         return "org.apache.hadoop.hive.ql.stats.fs.FSStatsPublisher";
+       }
+ 
+       @Override
+       public String getAggregator(Configuration conf) {
+         return "org.apache.hadoop.hive.ql.stats.fs.FSStatsAggregator";
+       }
+     },
+     custom {
+       @Override
+       public String getPublisher(Configuration conf) {
+         return MetastoreConf.getVar(conf, ConfVars.STATS_DEFAULT_PUBLISHER); }
+       @Override
+       public String getAggregator(Configuration conf) {
+         return MetastoreConf.getVar(conf,  ConfVars.STATS_DEFAULT_AGGREGATOR); }
+     };
+     public abstract String getPublisher(Configuration conf);
+     public abstract String getAggregator(Configuration conf);
+   }
+ 
+   // statistics stored in metastore
+   /**
+    * The name of the statistic Num Files to be published or gathered.
+    */
+   public static final String NUM_FILES = "numFiles";
+ 
+   /**
+    * The name of the statistic Num Partitions to be published or gathered.
+    */
+   public static final String NUM_PARTITIONS = "numPartitions";
+ 
+   /**
+    * The name of the statistic Total Size to be published or gathered.
+    */
+   public static final String TOTAL_SIZE = "totalSize";
+ 
+   /**
+    * The name of the statistic Row Count to be published or gathered.
+    */
+   public static final String ROW_COUNT = "numRows";
+ 
+   public static final String RUN_TIME_ROW_COUNT = "runTimeNumRows";
+ 
+   /**
+    * The name of the statistic Raw Data Size to be published or gathered.
+    */
+   public static final String RAW_DATA_SIZE = "rawDataSize";
+ 
+   /**
+    * The name of the statistic for Number of Erasure Coded Files - to be published or gathered.
+    */
+   public static final String NUM_ERASURE_CODED_FILES = "numFilesErasureCoded";
+ 
+   /**
+    * Temp dir for writing stats from tasks.
+    */
+   public static final String STATS_TMP_LOC = "hive.stats.tmp.loc";
+ 
+   public static final String STATS_FILE_PREFIX = "tmpstats-";
+   /**
+    * List of all supported statistics
+    */
+   public static final List<String> SUPPORTED_STATS = ImmutableList.of(
+       NUM_FILES, ROW_COUNT, TOTAL_SIZE, RAW_DATA_SIZE, NUM_ERASURE_CODED_FILES);
+ 
+   /**
+    * List of all statistics that need to be collected during query execution. These are
+    * statistics that inherently require a scan of the data.
+    */
+   public static final List<String> STATS_REQUIRE_COMPUTE = ImmutableList.of(ROW_COUNT, RAW_DATA_SIZE);
+ 
+   /**
+    * List of statistics that can be collected quickly without requiring a scan of the data.
+    */
+   public static final List<String> FAST_STATS = ImmutableList.of(
+       NUM_FILES, TOTAL_SIZE, NUM_ERASURE_CODED_FILES);
+ 
+   // This string constant is used to indicate to AlterHandler that
+   // alterPartition/alterTable is happening via statsTask or via user.
+   public static final String STATS_GENERATED = "STATS_GENERATED";
+ 
+   public static final String TASK = "TASK";
+ 
+   public static final String USER = "USER";
+ 
+   // This string constant is used by AlterHandler to figure out that it should not attempt to
+   // update stats. It is set by any client-side task which wishes to signal that no stats
+   // update should take place, such as with replication.
+   public static final String DO_NOT_UPDATE_STATS = "DO_NOT_UPDATE_STATS";
+ 
+   //This string constant will be persisted in metastore to indicate whether corresponding
+   //table or partition's statistics and table or partition's column statistics are accurate or not.
+   public static final String COLUMN_STATS_ACCURATE = "COLUMN_STATS_ACCURATE";
+ 
+   public static final String COLUMN_STATS = "COLUMN_STATS";
+ 
+   public static final String BASIC_STATS = "BASIC_STATS";
+ 
+   public static final String CASCADE = "CASCADE";
+ 
++  // TODO: when alter calls are switched to req/resp models, replace these and the above with fields.
++  public static final String TXN_ID = "WRITER_TXN_ID";
++  public static final String VALID_WRITE_IDS = "WRITER_WRITE_ID";
++
+   public static final String TRUE = "true";
+ 
+   public static final String FALSE = "false";
+ 
+   // The parameter keys for the table statistics. Those keys are excluded from 'show create table' command output.
+   public static final List<String> TABLE_PARAMS_STATS_KEYS = ImmutableList.of(
+       COLUMN_STATS_ACCURATE, NUM_FILES, TOTAL_SIZE, ROW_COUNT, RAW_DATA_SIZE, NUM_PARTITIONS,
+       NUM_ERASURE_CODED_FILES);
+ 
+   private static class ColumnStatsAccurate {
+     private static ObjectReader objectReader;
+     private static ObjectWriter objectWriter;
+ 
+     static {
+       ObjectMapper objectMapper = new ObjectMapper();
+       objectReader = objectMapper.readerFor(ColumnStatsAccurate.class);
+       objectWriter = objectMapper.writerFor(ColumnStatsAccurate.class);
+     }
+ 
+     static class BooleanSerializer extends JsonSerializer<Boolean> {
+ 
+       @Override
+       public void serialize(Boolean value, JsonGenerator jsonGenerator,
+           SerializerProvider serializerProvider) throws IOException {
+         jsonGenerator.writeString(value.toString());
+       }
+     }
+ 
+     static class BooleanDeserializer extends JsonDeserializer<Boolean> {
+ 
+       public Boolean deserialize(JsonParser jsonParser,
+           DeserializationContext deserializationContext)
+               throws IOException {
+         return Boolean.valueOf(jsonParser.getValueAsString());
+       }
+     }
+ 
+     @JsonInclude(JsonInclude.Include.NON_DEFAULT)
+     @JsonSerialize(using = BooleanSerializer.class)
+     @JsonDeserialize(using = BooleanDeserializer.class)
+     @JsonProperty(BASIC_STATS)
+     boolean basicStats;
+ 
+     @JsonInclude(JsonInclude.Include.NON_EMPTY)
+     @JsonProperty(COLUMN_STATS)
+     @JsonSerialize(contentUsing = BooleanSerializer.class)
+     @JsonDeserialize(contentUsing = BooleanDeserializer.class)
+     TreeMap<String, Boolean> columnStats = new TreeMap<>();
+ 
+   }
+ 
+   public static boolean areBasicStatsUptoDate(Map<String, String> params) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.basicStats;
+   }
+ 
+   public static boolean areColumnStatsUptoDate(Map<String, String> params, String colName) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.columnStats.containsKey(colName);
+   }
+ 
+   // It will only throw JSONException when stats.put(BASIC_STATS, TRUE)
+   // has duplicate key, which is not possible
+   // note that set basic stats false will wipe out column stats too.
+   public static void setBasicStatsState(Map<String, String> params, String setting) {
+     if (setting.equals(FALSE)) {
+       if (params!=null && params.containsKey(COLUMN_STATS_ACCURATE)) {
+         params.remove(COLUMN_STATS_ACCURATE);
+       }
+       return;
+     }
+     if (params == null) {
+       throw new RuntimeException("params are null...cant set columnstatstate!");
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     stats.basicStats = true;
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       throw new RuntimeException("can't serialize column stats", e);
+     }
+   }
+ 
+   public static void setColumnStatsState(Map<String, String> params, List<String> colNames) {
+     if (params == null) {
+       throw new RuntimeException("params are null...cant set columnstatstate!");
+     }
+     if (colNames == null) {
+       return;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+ 
+     for (String colName : colNames) {
+       if (!stats.columnStats.containsKey(colName)) {
+         stats.columnStats.put(colName, true);
+       }
+     }
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static boolean canColumnStatsMerge(Map<String, String> params, String colName) {
+     if (params == null) {
+       return false;
+     }
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     return stats.columnStats.containsKey(colName);
+   }
+   
+   public static void clearColumnStatsState(Map<String, String> params) {
+     if (params == null) {
+       return;
+     }
+ 
+     ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+     stats.columnStats.clear();
+ 
+     try {
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static void removeColumnStatsState(Map<String, String> params, List<String> colNames) {
+     if (params == null) {
+       return;
+     }
+     try {
+       ColumnStatsAccurate stats = parseStatsAcc(params.get(COLUMN_STATS_ACCURATE));
+       for (String string : colNames) {
+         stats.columnStats.remove(string);
+       }
+       params.put(COLUMN_STATS_ACCURATE, ColumnStatsAccurate.objectWriter.writeValueAsString(stats));
+     } catch (JsonProcessingException e) {
+       LOG.trace(e.getMessage());
+     }
+   }
+ 
+   public static void setStatsStateForCreateTable(Map<String, String> params,
+       List<String> cols, String setting) {
+     if (TRUE.equals(setting)) {
+       for (String stat : StatsSetupConst.SUPPORTED_STATS) {
+         params.put(stat, "0");
+       }
+     }
+     setBasicStatsState(params, setting);
+     if (TRUE.equals(setting)) {
+       setColumnStatsState(params, cols);
+     }
+   }
+   
+   private static ColumnStatsAccurate parseStatsAcc(String statsAcc) {
+     if (statsAcc == null) {
+       return new ColumnStatsAccurate();
+     }
+     try {
+       return ColumnStatsAccurate.objectReader.readValue(statsAcc);
+     } catch (Exception e) {
+       ColumnStatsAccurate ret = new ColumnStatsAccurate();
+       if (TRUE.equalsIgnoreCase(statsAcc)) {
+         ret.basicStats = true;
+       }
+       return ret;
+     }
+   }
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 0000000,050dca9..e7cf07f
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@@ -1,0 -1,202 +1,203 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import java.util.List;
+ 
+ import org.apache.hadoop.conf.Configurable;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ 
+ /**
+  * Interface for Alter Table and Alter Partition code
+  */
+ public interface AlterHandler extends Configurable {
+ 
+   /**
+    * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, Warehouse, String,
+    * String, String, Table, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter table, the changes could be cascaded to partitions if applicable
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    *          Hive Warehouse where table data is stored
+    * @param catName
+    *          catalog of the table being altered
+    * @param dbname
+    *          database of the table being altered
+    * @param name
+    *          original name of the table being altered. same as
+    *          <i>newTable.tableName</i> if alter op is not a rename.
+    * @param newTable
+    *          new table object
+    * @throws InvalidOperationException
+    *           thrown if the newTable object is invalid
+    * @throws MetaException
+    *           thrown if there is any other error
+    */
+   @Deprecated
+   default void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+     String name, Table newTable, EnvironmentContext envContext)
+       throws InvalidOperationException, MetaException {
+     alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null);
+   }
+ 
+   /**
+    * handles alter table, the changes could be cascaded to partitions if applicable
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    *          Hive Warehouse where table data is stored
+    * @param catName catalog of the table being altered
+    * @param dbname
+    *          database of the table being altered
+    * @param name
+    *          original name of the table being altered. same as
+    *          <i>newTable.tableName</i> if alter op is not a rename.
+    * @param newTable
+    *          new table object
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @throws InvalidOperationException
+    *           thrown if the newTable object is invalid
+    * @throws MetaException
+    *           thrown if there is any other error
+    */
+   void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+       String name, Table newTable, EnvironmentContext envContext,
+       IHMSHandler handler) throws InvalidOperationException, MetaException;
+ 
+   /**
+    * @deprecated As of release 2.2.0.  Replaced by {@link #alterPartition(RawStore, Warehouse, String,
+    * String, List, Partition, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter partition
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param part_vals
+    *          original values of the partition being altered
+    * @param new_part
+    *          new partition object
+    * @return the altered partition
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   @Deprecated
+   Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<String> part_vals, final Partition new_part,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * handles alter partition
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh physical warehouse class
+    * @param catName catalog name
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param part_vals
+    *          original values of the partition being altered
+    * @param new_part
+    *          new partition object
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @return the altered partition
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
+                            final String dbname, final String name, final List<String> part_vals,
+                            final Partition new_part, EnvironmentContext environmentContext,
+                            IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * @deprecated As of release 3.0.0. Replaced by {@link #alterPartitions(RawStore, Warehouse, String,
+    * String, String, List, EnvironmentContext, IHMSHandler)}
+    *
+    * handles alter partitions
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param new_parts
+    *          new partition list
+    * @return the altered partition list
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   @Deprecated
+   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
+     final String dbname, final String name, final List<Partition> new_parts,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ 
+   /**
+    * handles alter partitions
+    *
+    * @param msdb
+    *          object to get metadata
+    * @param wh
+    * @param dbname
+    *          database of the partition being altered
+    * @param name
+    *          table of the partition being altered
+    * @param new_parts
+    *          new partition list
+    * @param handler
+    *          HMSHandle object (required to log event notification)
+    * @return the altered partition list
+    * @throws InvalidOperationException
+    * @throws InvalidObjectException
+    * @throws AlreadyExistsException
+    * @throws MetaException
+    */
+   List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
+     final String dbname, final String name, final List<Partition> new_parts,
 -    EnvironmentContext environmentContext,IHMSHandler handler)
++    EnvironmentContext environmentContext, long txnId, String writeIdList, long writeId,
++    IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException;
+ }

http://git-wip-us.apache.org/repos/asf/hive/blob/93b9cdd6/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
----------------------------------------------------------------------
diff --cc standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 0000000,93ac74c..8b2a6ba
mode 000000,100644..100644
--- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@@ -1,0 -1,948 +1,974 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.hadoop.hive.metastore;
+ 
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.collect.Lists;
+ 
+ import org.apache.commons.lang.StringUtils;
+ import org.apache.hadoop.hive.common.TableName;
+ import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+ import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent;
+ import org.apache.hadoop.hive.metastore.events.AlterTableEvent;
+ import org.apache.hadoop.hive.metastore.messaging.EventMessage;
+ import org.apache.hadoop.hive.metastore.utils.FileUtils;
+ import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
+ import org.slf4j.Logger;
+ import org.slf4j.LoggerFactory;
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.hive.common.StatsSetupConst;
+ import org.apache.hadoop.hive.metastore.api.AlreadyExistsException;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatistics;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc;
+ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj;
+ import org.apache.hadoop.hive.metastore.api.Database;
+ import org.apache.hadoop.hive.metastore.api.EnvironmentContext;
+ import org.apache.hadoop.hive.metastore.api.FieldSchema;
+ import org.apache.hadoop.hive.metastore.api.InvalidInputException;
+ import org.apache.hadoop.hive.metastore.api.InvalidObjectException;
+ import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+ import org.apache.hadoop.hive.metastore.api.MetaException;
+ import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+ import org.apache.hadoop.hive.metastore.api.Partition;
+ import org.apache.hadoop.hive.metastore.api.Table;
+ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+ 
+ import java.io.IOException;
+ import java.net.URI;
+ import java.util.ArrayList;
+ import java.util.Collections;
+ import java.util.HashMap;
+ import java.util.Iterator;
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Map.Entry;
+ 
+ import static org.apache.hadoop.hive.metastore.Warehouse.DEFAULT_CATALOG_NAME;
+ import static org.apache.hadoop.hive.metastore.utils.MetaStoreUtils.getDefaultCatalog;
+ import static org.apache.hadoop.hive.metastore.utils.StringUtils.normalizeIdentifier;
+ 
+ /**
+  * Hive specific implementation of alter
+  */
+ public class HiveAlterHandler implements AlterHandler {
+ 
+   protected Configuration conf;
+   private static final Logger LOG = LoggerFactory.getLogger(HiveAlterHandler.class
+       .getName());
+ 
+   // hiveConf, getConf and setConf are in this class because AlterHandler extends Configurable.
+   // Always use the configuration from HMS Handler.  Making AlterHandler not extend Configurable
+   // is not in the scope of the fix for HIVE-17942.
+   @Override
+   public Configuration getConf() {
+     return conf;
+   }
+ 
+   @Override
+   @SuppressWarnings("nls")
+   public void setConf(Configuration conf) {
+     this.conf = conf;
+   }
+ 
+   @Override
+   public void alterTable(RawStore msdb, Warehouse wh, String catName, String dbname,
+       String name, Table newt, EnvironmentContext environmentContext,
+       IHMSHandler handler) throws InvalidOperationException, MetaException {
+     catName = normalizeIdentifier(catName);
+     name = name.toLowerCase();
+     dbname = dbname.toLowerCase();
+ 
+     final boolean cascade = environmentContext != null
+         && environmentContext.isSetProperties()
+         && StatsSetupConst.TRUE.equals(environmentContext.getProperties().get(
+             StatsSetupConst.CASCADE));
+     if (newt == null) {
+       throw new InvalidOperationException("New table is null");
+     }
+ 
+     String newTblName = newt.getTableName().toLowerCase();
+     String newDbName = newt.getDbName().toLowerCase();
+ 
+     if (!MetaStoreUtils.validateName(newTblName, handler.getConf())) {
+       throw new InvalidOperationException(newTblName + " is not a valid object name");
+     }
+     String validate = MetaStoreUtils.validateTblColumns(newt.getSd().getCols());
+     if (validate != null) {
+       throw new InvalidOperationException("Invalid column " + validate);
+     }
+ 
+     Path srcPath = null;
+     FileSystem srcFs;
+     Path destPath = null;
+     FileSystem destFs = null;
+ 
+     boolean success = false;
+     boolean dataWasMoved = false;
+     boolean isPartitionedTable = false;
+ 
+     Table oldt = null;
+ 
+     List<TransactionalMetaStoreEventListener> transactionalListeners = handler.getTransactionalListeners();
+     List<MetaStoreEventListener> listeners = handler.getListeners();
+     Map<String, String> txnAlterTableEventResponses = Collections.emptyMap();
+ 
+     try {
+       boolean rename = false;
+       List<Partition> parts;
+ 
+       // Switching tables between catalogs is not allowed.
+       if (!catName.equalsIgnoreCase(newt.getCatName())) {
+         throw new InvalidOperationException("Tables cannot be moved between catalogs, old catalog" +
+             catName + ", new catalog " + newt.getCatName());
+       }
+ 
+       // check if table with the new name already exists
+       if (!newTblName.equals(name) || !newDbName.equals(dbname)) {
 -        if (msdb.getTable(catName, newDbName, newTblName) != null) {
++        if (msdb.getTable(catName, newDbName, newTblName,  -1, null) != null) {
+           throw new InvalidOperationException("new table " + newDbName
+               + "." + newTblName + " already exists");
+         }
+         rename = true;
+       }
+ 
+       msdb.openTransaction();
+       // get old table
 -      oldt = msdb.getTable(catName, dbname, name);
++      // Note: we don't verify stats here; it's done below in alterTableUpdateTableColumnStats.
++      oldt = msdb.getTable(catName, dbname, name, -1, null);
+       if (oldt == null) {
+         throw new InvalidOperationException("table " +
+             TableName.getQualified(catName, dbname, name) + " doesn't exist");
+       }
+ 
+       if (oldt.getPartitionKeysSize() != 0) {
+         isPartitionedTable = true;
+       }
+ 
+       // Views derive the column type from the base table definition.  So the view definition
+       // can be altered to change the column types.  The column type compatibility checks should
+       // be done only for non-views.
+       if (MetastoreConf.getBoolVar(handler.getConf(),
+             MetastoreConf.ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES) &&
+           !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())) {
+         // Throws InvalidOperationException if the new column types are not
+         // compatible with the current column types.
+         checkColTypeChangeCompatible(oldt.getSd().getCols(), newt.getSd().getCols());
+       }
+ 
+       //check that partition keys have not changed, except for virtual views
+       //however, allow the partition comments to change
+       boolean partKeysPartiallyEqual = checkPartialPartKeysEqual(oldt.getPartitionKeys(),
+           newt.getPartitionKeys());
+ 
+       if(!oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())){
+         if (!partKeysPartiallyEqual) {
+           throw new InvalidOperationException("partition keys can not be changed.");
+         }
+       }
+ 
+       // rename needs change the data location and move the data to the new location corresponding
+       // to the new name if:
+       // 1) the table is not a virtual view, and
+       // 2) the table is not an external table, and
+       // 3) the user didn't change the default location (or new location is empty), and
+       // 4) the table was not initially created with a specified location
+       if (rename
+           && !oldt.getTableType().equals(TableType.VIRTUAL_VIEW.toString())
+           && (oldt.getSd().getLocation().compareTo(newt.getSd().getLocation()) == 0
+             || StringUtils.isEmpty(newt.getSd().getLocation()))
+           && !MetaStoreUtils.isExternalTable(oldt)) {
+         Database olddb = msdb.getDatabase(catName, dbname);
+         // if a table was created in a user specified location using the DDL like
+         // create table tbl ... location ...., it should be treated like an external table
+         // in the table rename, its data location should not be changed. We can check
+         // if the table directory was created directly under its database directory to tell
+         // if it is such a table
+         srcPath = new Path(oldt.getSd().getLocation());
+         String oldtRelativePath = (new Path(olddb.getLocationUri()).toUri())
+             .relativize(srcPath.toUri()).toString();
+         boolean tableInSpecifiedLoc = !oldtRelativePath.equalsIgnoreCase(name)
+             && !oldtRelativePath.equalsIgnoreCase(name + Path.SEPARATOR);
+ 
+         if (!tableInSpecifiedLoc) {
+           srcFs = wh.getFs(srcPath);
+ 
+           // get new location
+           Database db = msdb.getDatabase(catName, newDbName);
+           Path databasePath = constructRenamedPath(wh.getDatabasePath(db), srcPath);
+           destPath = new Path(databasePath, newTblName);
+           destFs = wh.getFs(destPath);
+ 
+           newt.getSd().setLocation(destPath.toString());
+ 
+           // check that destination does not exist otherwise we will be
+           // overwriting data
+           // check that src and dest are on the same file system
+           if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+             throw new InvalidOperationException("table new location " + destPath
+                 + " is on a different file system than the old location "
+                 + srcPath + ". This operation is not supported");
+           }
+ 
+           try {
+             if (destFs.exists(destPath)) {
+               throw new InvalidOperationException("New location for this table " +
+                   TableName.getQualified(catName, newDbName, newTblName) +
+                       " already exists : " + destPath);
+             }
+             // check that src exists and also checks permissions necessary, rename src to dest
+             if (srcFs.exists(srcPath) && wh.renameDir(srcPath, destPath,
+                     ReplChangeManager.isSourceOfReplication(olddb))) {
+               dataWasMoved = true;
+             }
+           } catch (IOException | MetaException e) {
+             LOG.error("Alter Table operation for " + dbname + "." + name + " failed.", e);
+             throw new InvalidOperationException("Alter Table operation for " + dbname + "." + name +
+                 " failed to move data due to: '" + getSimpleMessage(e)
+                 + "' See hive log file for details.");
+           }
+ 
+           if (!HiveMetaStore.isRenameAllowed(olddb, db)) {
+             LOG.error("Alter Table operation for " + TableName.getQualified(catName, dbname, name) +
+                     "to new table = " + TableName.getQualified(catName, newDbName, newTblName) + " failed ");
+             throw new MetaException("Alter table not allowed for table " +
+                     TableName.getQualified(catName, dbname, name) +
+                     "to new table = " + TableName.getQualified(catName, newDbName, newTblName));
+           }
+         }
+ 
+         if (isPartitionedTable) {
+           String oldTblLocPath = srcPath.toUri().getPath();
+           String newTblLocPath = dataWasMoved ? destPath.toUri().getPath() : null;
+ 
+           // also the location field in partition
+           parts = msdb.getPartitions(catName, dbname, name, -1);
+           Map<Partition, ColumnStatistics> columnStatsNeedUpdated = new HashMap<>();
+           for (Partition part : parts) {
+             String oldPartLoc = part.getSd().getLocation();
+             if (dataWasMoved && oldPartLoc.contains(oldTblLocPath)) {
+               URI oldUri = new Path(oldPartLoc).toUri();
+               String newPath = oldUri.getPath().replace(oldTblLocPath, newTblLocPath);
+               Path newPartLocPath = new Path(oldUri.getScheme(), oldUri.getAuthority(), newPath);
+               part.getSd().setLocation(newPartLocPath.toString());
+             }
+             part.setDbName(newDbName);
+             part.setTableName(newTblName);
+             ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
+                 part.getValues(), part.getSd().getCols(), oldt, part, null);
+             if (colStats != null) {
+               columnStatsNeedUpdated.put(part, colStats);
+             }
+           }
 -          msdb.alterTable(catName, dbname, name, newt);
++          // Do not verify stats parameters on a partitioned table.
++          msdb.alterTable(catName, dbname, name, newt, -1, null);
+           // alterPartition is only for changing the partition location in the table rename
+           if (dataWasMoved) {
+ 
+             int partsToProcess = parts.size();
+             int partitionBatchSize = MetastoreConf.getIntVar(handler.getConf(),
+                 MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX);
+             int batchStart = 0;
+             while (partsToProcess > 0) {
+               int batchEnd = Math.min(batchStart + partitionBatchSize, parts.size());
+               List<Partition> partBatch = parts.subList(batchStart, batchEnd);
+               int partBatchSize = partBatch.size();
+               partsToProcess -= partBatchSize;
+               batchStart += partBatchSize;
+               List<List<String>> partValues = new ArrayList<>(partBatchSize);
+               for (Partition part : partBatch) {
+                 partValues.add(part.getValues());
+               }
 -              msdb.alterPartitions(catName, newDbName, newTblName, partValues, partBatch);
++              msdb.alterPartitions(catName, newDbName, newTblName, partValues,
++                  partBatch, -1, -1, null);
+             }
+           }
+ 
+           for (Entry<Partition, ColumnStatistics> partColStats : columnStatsNeedUpdated.entrySet()) {
+             ColumnStatistics newPartColStats = partColStats.getValue();
+             newPartColStats.getStatsDesc().setDbName(newDbName);
+             newPartColStats.getStatsDesc().setTableName(newTblName);
+             msdb.updatePartitionColumnStatistics(newPartColStats, partColStats.getKey().getValues());
+           }
+         } else {
 -          alterTableUpdateTableColumnStats(msdb, oldt, newt);
++          alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext);
+         }
+       } else {
+         // operations other than table rename
+ 
+         if (MetaStoreUtils.requireCalStats(null, null, newt, environmentContext) &&
+             !isPartitionedTable) {
+           Database db = msdb.getDatabase(catName, newDbName);
+           // Update table stats. For partitioned table, we update stats in alterPartition()
+           MetaStoreUtils.updateTableStatsSlow(db, newt, wh, false, true, environmentContext);
+         }
+ 
+         if (isPartitionedTable) {
+           //Currently only column related changes can be cascaded in alter table
+           if(!MetaStoreUtils.areSameColumns(oldt.getSd().getCols(), newt.getSd().getCols())) {
+             parts = msdb.getPartitions(catName, dbname, name, -1);
+             for (Partition part : parts) {
+               Partition oldPart = new Partition(part);
+               List<FieldSchema> oldCols = part.getSd().getCols();
+               part.getSd().setCols(newt.getSd().getCols());
+               ColumnStatistics colStats = updateOrGetPartitionColumnStats(msdb, catName, dbname, name,
+                   part.getValues(), oldCols, oldt, part, null);
+               assert(colStats == null);
++              // Note: we don't do txn stats validation here; this can only delete stats?
+               if (cascade) {
 -                msdb.alterPartition(catName, dbname, name, part.getValues(), part);
++                msdb.alterPartition(catName, dbname, name, part.getValues(), part, -1, null);
+               } else {
+                 // update changed properties (stats)
+                 oldPart.setParameters(part.getParameters());
 -                msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart);
++                msdb.alterPartition(catName, dbname, name, part.getValues(), oldPart, -1, null);
+               }
+             }
 -            msdb.alterTable(catName, dbname, name, newt);
++            // Don't validate table-level stats for a partitoned table.
++            msdb.alterTable(catName, dbname, name, newt, -1, null);
+           } else {
+             LOG.warn("Alter table not cascaded to partitions.");
 -            alterTableUpdateTableColumnStats(msdb, oldt, newt);
++            alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext);
+           }
+         } else {
 -          alterTableUpdateTableColumnStats(msdb, oldt, newt);
++          alterTableUpdateTableColumnStats(msdb, oldt, newt, environmentContext);
+         }
+       }
+ 
+       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+         txnAlterTableEventResponses = MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                   EventMessage.EventType.ALTER_TABLE,
+                   new AlterTableEvent(oldt, newt, false, true, handler),
+                   environmentContext);
+       }
+       // commit the changes
+       success = msdb.commitTransaction();
+     } catch (InvalidObjectException e) {
+       LOG.debug("Failed to get object from Metastore ", e);
+       throw new InvalidOperationException(
+           "Unable to change partition or table."
+               + " Check metastore logs for detailed stack." + e.getMessage());
+     } catch (InvalidInputException e) {
+         LOG.debug("Accessing Metastore failed due to invalid input ", e);
+         throw new InvalidOperationException(
+             "Unable to change partition or table."
+                 + " Check metastore logs for detailed stack." + e.getMessage());
+     } catch (NoSuchObjectException e) {
+       LOG.debug("Object not found in metastore ", e);
+       throw new InvalidOperationException(
+           "Unable to change partition or table. Database " + dbname + " does not exist"
+               + " Check metastore logs for detailed stack." + e.getMessage());
+     } finally {
+       if (!success) {
+         LOG.error("Failed to alter table " + TableName.getQualified(catName, dbname, name));
+         msdb.rollbackTransaction();
+         if (dataWasMoved) {
+           try {
+             if (destFs.exists(destPath)) {
+               if (!destFs.rename(destPath, srcPath)) {
+                 LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                     + " in alter table failure. Manual restore is needed.");
+               }
+             }
+           } catch (IOException e) {
+             LOG.error("Failed to restore data from " + destPath + " to " + srcPath
+                 +  " in alter table failure. Manual restore is needed.");
+           }
+         }
+       }
+     }
+ 
+     if (!listeners.isEmpty()) {
+       // I don't think event notifications in case of failures are necessary, but other HMS operations
+       // make this call whether the event failed or succeeded. To make this behavior consistent,
+       // this call is made for failed events also.
+       MetaStoreListenerNotifier.notifyEvent(listeners, EventMessage.EventType.ALTER_TABLE,
+           new AlterTableEvent(oldt, newt, false, success, handler),
+           environmentContext, txnAlterTableEventResponses, msdb);
+     }
+   }
+ 
+   /**
+    * MetaException that encapsulates error message from RemoteException from hadoop RPC which wrap
+    * the stack trace into e.getMessage() which makes logs/stack traces confusing.
+    * @param ex
+    * @return
+    */
+   String getSimpleMessage(Exception ex) {
+     if(ex instanceof MetaException) {
+       String msg = ex.getMessage();
+       if(msg == null || !msg.contains("\n")) {
+         return msg;
+       }
+       return msg.substring(0, msg.indexOf('\n'));
+     }
+     return ex.getMessage();
+   }
+ 
+   @Override
+   public Partition alterPartition(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<String> part_vals, final Partition new_part,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     return alterPartition(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, part_vals, new_part,
+         environmentContext, null);
+   }
+ 
+   @Override
+   public Partition alterPartition(final RawStore msdb, Warehouse wh, final String catName,
+                                   final String dbname, final String name,
+                                   final List<String> part_vals, final Partition new_part,
+                                   EnvironmentContext environmentContext, IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     boolean success = false;
+     Partition oldPart;
+     List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+     if (handler != null) {
+       transactionalListeners = handler.getTransactionalListeners();
+     }
+ 
+     // Set DDL time to now if not specified
+     if (new_part.getParameters() == null ||
+         new_part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+         Integer.parseInt(new_part.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+       new_part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+           .currentTimeMillis() / 1000));
+     }
++    long txnId = -1;
++    String validWriteIds = null;
++    if (environmentContext != null && environmentContext.isSetProperties()
++        && environmentContext.getProperties().containsKey(StatsSetupConst.VALID_WRITE_IDS)) {
++      txnId = Long.parseLong(environmentContext.getProperties().get(StatsSetupConst.TXN_ID));
++      validWriteIds = environmentContext.getProperties().get(StatsSetupConst.VALID_WRITE_IDS);
++    }
+ 
+     //alter partition
+     if (part_vals == null || part_vals.size() == 0) {
+       try {
+         msdb.openTransaction();
+ 
 -        Table tbl = msdb.getTable(catName, dbname, name);
++        Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+         if (tbl == null) {
+           throw new InvalidObjectException(
+               "Unable to alter partition because table or database does not exist.");
+         }
+         oldPart = msdb.getPartition(catName, dbname, name, new_part.getValues());
+         if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
+           // if stats are same, no need to update
+           if (MetaStoreUtils.isFastStatsSame(oldPart, new_part)) {
+             MetaStoreUtils.updateBasicState(environmentContext, new_part.getParameters());
+           } else {
+             MetaStoreUtils.updatePartitionStatsFast(
+                 new_part, tbl, wh, false, true, environmentContext, false);
+           }
+         }
+ 
+         // PartitionView does not have SD. We do not need update its column stats
+         if (oldPart.getSd() != null) {
+           updateOrGetPartitionColumnStats(msdb, catName, dbname, name, new_part.getValues(),
+               oldPart.getSd().getCols(), tbl, new_part, null);
+         }
 -        msdb.alterPartition(catName, dbname, name, new_part.getValues(), new_part);
++        msdb.alterPartition(
++            catName, dbname, name, new_part.getValues(), new_part, txnId, validWriteIds);
+         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                 EventMessage.EventType.ALTER_PARTITION,
+                                                 new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                                 environmentContext);
+ 
+ 
+         }
+         success = msdb.commitTransaction();
+       } catch (InvalidObjectException e) {
+         LOG.warn("Alter failed", e);
+         throw new InvalidOperationException("alter is not possible: " + e.getMessage());
+       } catch (NoSuchObjectException e) {
+         //old partition does not exist
+         throw new InvalidOperationException("alter is not possible: " + e.getMessage());
+       } finally {
+         if(!success) {
+           msdb.rollbackTransaction();
+         }
+       }
+       return oldPart;
+     }
+ 
+     //rename partition
+     String oldPartLoc;
+     String newPartLoc;
+     Path srcPath = null;
+     Path destPath = null;
+     FileSystem srcFs;
+     FileSystem destFs = null;
+     boolean dataWasMoved = false;
+     Database db;
+     try {
+       msdb.openTransaction();
 -      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name);
++      Table tbl = msdb.getTable(DEFAULT_CATALOG_NAME, dbname, name,  -1, null);
+       if (tbl == null) {
+         throw new InvalidObjectException(
+             "Unable to alter partition because table or database does not exist.");
+       }
+       try {
+         oldPart = msdb.getPartition(catName, dbname, name, part_vals);
+       } catch (NoSuchObjectException e) {
+         // this means there is no existing partition
+         throw new InvalidObjectException(
+             "Unable to rename partition because old partition does not exist");
+       }
+ 
+       Partition check_part;
+       try {
+         check_part = msdb.getPartition(catName, dbname, name, new_part.getValues());
+       } catch(NoSuchObjectException e) {
+         // this means there is no existing partition
+         check_part = null;
+       }
+ 
+       if (check_part != null) {
+         throw new AlreadyExistsException("Partition already exists:" + dbname + "." + name + "." +
+             new_part.getValues());
+       }
+ 
+       // when renaming a partition, we should update
+       // 1) partition SD Location
+       // 2) partition column stats if there are any because of part_name field in HMS table PART_COL_STATS
+       // 3) rename the partition directory if it is not an external table
+       if (!tbl.getTableType().equals(TableType.EXTERNAL_TABLE.toString())) {
+         try {
+           db = msdb.getDatabase(catName, dbname);
+ 
+           // if tbl location is available use it
+           // else derive the tbl location from database location
+           destPath = wh.getPartitionPath(db, tbl, new_part.getValues());
+           destPath = constructRenamedPath(destPath, new Path(new_part.getSd().getLocation()));
+         } catch (NoSuchObjectException e) {
+           LOG.debug("Didn't find object in metastore ", e);
+           throw new InvalidOperationException(
+             "Unable to change partition or table. Database " + dbname + " does not exist"
+               + " Check metastore logs for detailed stack." + e.getMessage());
+         }
+ 
+         if (destPath != null) {
+           newPartLoc = destPath.toString();
+           oldPartLoc = oldPart.getSd().getLocation();
+           LOG.info("srcPath:" + oldPartLoc);
+           LOG.info("descPath:" + newPartLoc);
+           srcPath = new Path(oldPartLoc);
+           srcFs = wh.getFs(srcPath);
+           destFs = wh.getFs(destPath);
+           // check that src and dest are on the same file system
+           if (!FileUtils.equalsFileSystem(srcFs, destFs)) {
+             throw new InvalidOperationException("New table location " + destPath
+               + " is on a different file system than the old location "
+               + srcPath + ". This operation is not supported.");
+           }
+ 
+           try {
+             if (srcFs.exists(srcPath)) {
+               if (newPartLoc.compareTo(oldPartLoc) != 0 && destFs.exists(destPath)) {
+                 throw new InvalidOperationException("New location for this table "
+                   + tbl.getDbName() + "." + tbl.getTableName()
+                   + " already exists : " + destPath);
+               }
+               //if destPath's parent path doesn't exist, we should mkdir it
+               Path destParentPath = destPath.getParent();
+               if (!wh.mkdirs(destParentPath)) {
+                   throw new MetaException("Unable to create path " + destParentPath);
+               }
+ 
+               //rename the data directory
+               wh.renameDir(srcPath, destPath, ReplChangeManager.isSourceOfReplication(db));
+               LOG.info("Partition directory rename from " + srcPath + " to " + destPath + " done.");
+               dataWasMoved = true;
+             }
+           } catch (IOException e) {
+             LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, e);
+             throw new InvalidOperationException("Unable to access src or dest location for partition "
+                 + tbl.getDbName() + "." + tbl.getTableName() + " " + new_part.getValues());
+           } catch (MetaException me) {
+             LOG.error("Cannot rename partition directory from " + srcPath + " to " + destPath, me);
+             throw me;
+           }
+           new_part.getSd().setLocation(newPartLoc);
+         }
+       } else {
+         new_part.getSd().setLocation(oldPart.getSd().getLocation());
+       }
+ 
+       if (MetaStoreUtils.requireCalStats(oldPart, new_part, tbl, environmentContext)) {
+         MetaStoreUtils.updatePartitionStatsFast(
+             new_part, tbl, wh, false, true, environmentContext, false);
+       }
+ 
+       String newPartName = Warehouse.makePartName(tbl.getPartitionKeys(), new_part.getValues());
+       ColumnStatistics cs = updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldPart.getValues(),
+           oldPart.getSd().getCols(), tbl, new_part, null);
 -      msdb.alterPartition(catName, dbname, name, part_vals, new_part);
++      msdb.alterPartition(catName, dbname, name, part_vals, new_part, txnId, validWriteIds);
+       if (cs != null) {
+         cs.getStatsDesc().setPartName(newPartName);
+         try {
+           msdb.updatePartitionColumnStatistics(cs, new_part.getValues());
+         } catch (InvalidInputException iie) {
+           throw new InvalidOperationException("Unable to update partition stats in table rename." + iie);
+         } catch (NoSuchObjectException nsoe) {
+           // It is ok, ignore
+         }
+       }
+ 
+       if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+         MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                               EventMessage.EventType.ALTER_PARTITION,
+                                               new AlterPartitionEvent(oldPart, new_part, tbl, false, true, handler),
+                                               environmentContext);
+       }
+ 
+       success = msdb.commitTransaction();
+     } finally {
+       if (!success) {
+         LOG.error("Failed to rename a partition. Rollback transaction");
+         msdb.rollbackTransaction();
+         if (dataWasMoved) {
+           LOG.error("Revert the data move in renaming a partition.");
+           try {
+             if (destFs.exists(destPath)) {
+               wh.renameDir(destPath, srcPath, false);
+             }
+           } catch (MetaException me) {
+             LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                 +  " in alter partition failure. Manual restore is needed.");
+           } catch (IOException ioe) {
+             LOG.error("Failed to restore partition data from " + destPath + " to " + srcPath
+                 +  " in alter partition failure. Manual restore is needed.");
+           }
+         }
+       }
+     }
+     return oldPart;
+   }
+ 
++  @Deprecated
+   @Override
+   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String dbname,
+     final String name, final List<Partition> new_parts,
+     EnvironmentContext environmentContext)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     return alterPartitions(msdb, wh, DEFAULT_CATALOG_NAME, dbname, name, new_parts,
 -        environmentContext, null);
++        environmentContext, -1, null, -1, null);
+   }
+ 
+   @Override
+   public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, final String catName,
+                                          final String dbname, final String name,
+                                          final List<Partition> new_parts,
 -                                         EnvironmentContext environmentContext, IHMSHandler handler)
++                                         EnvironmentContext environmentContext,
++                                         long txnId, String writeIdList, long writeId,
++                                         IHMSHandler handler)
+       throws InvalidOperationException, InvalidObjectException, AlreadyExistsException, MetaException {
+     List<Partition> oldParts = new ArrayList<>();
+     List<List<String>> partValsList = new ArrayList<>();
+     List<TransactionalMetaStoreEventListener> transactionalListeners = null;
+     if (handler != null) {
+       transactionalListeners = handler.getTransactionalListeners();
+     }
+ 
+     boolean success = false;
+     try {
+       msdb.openTransaction();
+ 
 -      Table tbl = msdb.getTable(catName, dbname, name);
++      // Note: should we pass in write ID here? We only update stats on parts so probably not.
++      Table tbl = msdb.getTable(catName, dbname, name,  -1, null);
+       if (tbl == null) {
+         throw new InvalidObjectException(
+             "Unable to alter partitions because table or database does not exist.");
+       }
+       for (Partition tmpPart: new_parts) {
+         // Set DDL time to now if not specified
+         if (tmpPart.getParameters() == null ||
+             tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME) == null ||
+             Integer.parseInt(tmpPart.getParameters().get(hive_metastoreConstants.DDL_TIME)) == 0) {
+           tmpPart.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System
+               .currentTimeMillis() / 1000));
+         }
+ 
+         Partition oldTmpPart = msdb.getPartition(catName, dbname, name, tmpPart.getValues());
+         oldParts.add(oldTmpPart);
+         partValsList.add(tmpPart.getValues());
+ 
+         if (MetaStoreUtils.requireCalStats(oldTmpPart, tmpPart, tbl, environmentContext)) {
+           // Check if stats are same, no need to update
+           if (MetaStoreUtils.isFastStatsSame(oldTmpPart, tmpPart)) {
+             MetaStoreUtils.updateBasicState(environmentContext, tmpPart.getParameters());
+           } else {
+             MetaStoreUtils.updatePartitionStatsFast(
+                 tmpPart, tbl, wh, false, true, environmentContext, false);
+           }
+         }
+ 
+         // PartitionView does not have SD and we do not need to update its column stats
+         if (oldTmpPart.getSd() != null) {
+           updateOrGetPartitionColumnStats(msdb, catName, dbname, name, oldTmpPart.getValues(),
+               oldTmpPart.getSd().getCols(), tbl, tmpPart, null);
+         }
+       }
+ 
 -      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts);
++      msdb.alterPartitions(catName, dbname, name, partValsList, new_parts, writeId, txnId, writeIdList);
+       Iterator<Partition> oldPartsIt = oldParts.iterator();
+       for (Partition newPart : new_parts) {
+         Partition oldPart;
+         if (oldPartsIt.hasNext()) {
+           oldPart = oldPartsIt.next();
+         } else {
+           throw new InvalidOperationException("Missing old partition corresponding to new partition " +
+               "when invoking MetaStoreEventListener for alterPartitions event.");
+         }
+ 
+         if (transactionalListeners != null && !transactionalListeners.isEmpty()) {
+           MetaStoreListenerNotifier.notifyEvent(transactionalListeners,
+                                                 EventMessage.EventType.ALTER_PARTITION,
+                                                 new AlterPartitionEvent(oldPart, newPart, tbl, false, true, handler));
+         }
+       }
+ 
+       success = msdb.commitTransaction();
+     } catch (InvalidObjectException | NoSuchObjectException e) {
+       throw new InvalidOperationException("Alter partition operation failed: " + e);
+     } finally {
+       if(!success) {
+         msdb.rollbackTransaction();
+       }
+     }
+ 
+     return oldParts;
+   }
+ 
+   private boolean checkPartialPartKeysEqual(List<FieldSchema> oldPartKeys,
+       List<FieldSchema> newPartKeys) {
+     //return true if both are null, or false if one is null and the other isn't
+     if (newPartKeys == null || oldPartKeys == null) {
+       return oldPartKeys == newPartKeys;
+     }
+     if (oldPartKeys.size() != newPartKeys.size()) {
+       return false;
+     }
+     Iterator<FieldSchema> oldPartKeysIter = oldPartKeys.iterator();
+     Iterator<FieldSchema> newPartKeysIter = newPartKeys.iterator();
+     FieldSchema oldFs;
+     FieldSchema newFs;
+     while (oldPartKeysIter.hasNext()) {
+       oldFs = oldPartKeysIter.next();
+       newFs = newPartKeysIter.next();
+       // Alter table can change the type of partition key now.
+       // So check the column name only.
+       if (!oldFs.getName().equals(newFs.getName())) {
+         return false;
+       }
+     }
+ 
+     return true;
+   }
+ 
+   /**
+    * Uses the scheme and authority of the object's current location and the path constructed
+    * using the object's new name to construct a path for the object's new location.
+    */
+   private Path constructRenamedPath(Path defaultNewPath, Path currentPath) {
+     URI currentUri = currentPath.toUri();
+ 
+     return new Path(currentUri.getScheme(), currentUri.getAuthority(),
+         defaultNewPath.toUri().getPath());
+   }
+ 
+   @VisibleForTesting
 -  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable)
++  void alterTableUpdateTableColumnStats(RawStore msdb, Table oldTable, Table newTable,
++      EnvironmentContext ec)
+       throws MetaException, InvalidObjectException {
+     String catName = normalizeIdentifier(oldTable.isSetCatName() ? oldTable.getCatName() :
+         getDefaultCatalog(conf));
+     String dbName = oldTable.getDbName().toLowerCase();
+     String tableName = normalizeIdentifier(oldTable.getTableName());
+     String newDbName = newTable.getDbName().toLowerCase();
+     String newTableName = normalizeIdentifier(newTable.getTableName());
++    long txnId = -1;
++    String validWriteIds = null;
++    if (ec != null && ec.isSetProperties() && ec.getProperties().containsKey(
++        StatsSetupConst.VALID_WRITE_IDS)) {
++      txnId = Long.parseLong(ec.getProperties().get(StatsSetupConst.TXN_ID));
++      validWriteIds = ec.getProperties().get(StatsSetupConst.VALID_WRITE_IDS);
++    }
+ 
+     try {
+       List<FieldSchema> oldCols = oldTable.getSd().getCols();
+       List<FieldSchema> newCols = newTable.getSd().getCols();
+       List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+       ColumnStatistics colStats = null;
+       boolean updateColumnStats = true;
+ 
+       // Nothing to update if everything is the same
+         if (newDbName.equals(dbName) &&
+             newTableName.equals(tableName) &&
+             MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
+           updateColumnStats = false;
+         }
+ 
+         if (updateColumnStats) {
+           List<String> oldColNames = new ArrayList<>(oldCols.size());
+           for (FieldSchema oldCol : oldCols) {
+             oldColNames.add(oldCol.getName());
+           }
+ 
 -          // Collect column stats which need to be rewritten and remove old stats
++          // Collect column stats which need to be rewritten and remove old stats.
+           colStats = msdb.getTableColumnStatistics(catName, dbName, tableName, oldColNames);
+           if (colStats == null) {
+             updateColumnStats = false;
+           } else {
+             List<ColumnStatisticsObj> statsObjs = colStats.getStatsObj();
+             if (statsObjs != null) {
+               List<String> deletedCols = new ArrayList<>();
+               for (ColumnStatisticsObj statsObj : statsObjs) {
+                 boolean found = false;
+                 for (FieldSchema newCol : newCols) {
+                   if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                       && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+                     found = true;
+                     break;
+                   }
+                 }
+ 
+                 if (found) {
+                   if (!newDbName.equals(dbName) || !newTableName.equals(tableName)) {
+                     msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
+                     newStatsObjs.add(statsObj);
+                     deletedCols.add(statsObj.getColName());
+                   }
+                 } else {
+                   msdb.deleteTableColumnStatistics(catName, dbName, tableName, statsObj.getColName());
+                   deletedCols.add(statsObj.getColName());
+                 }
+               }
+               StatsSetupConst.removeColumnStatsState(newTable.getParameters(), deletedCols);
+             }
+           }
+         }
+ 
+         // Change to new table and append stats for the new table
 -        msdb.alterTable(catName, dbName, tableName, newTable);
++        msdb.alterTable(catName, dbName, tableName, newTable, txnId, validWriteIds);
+         if (updateColumnStats && !newStatsObjs.isEmpty()) {
+           ColumnStatisticsDesc statsDesc = colStats.getStatsDesc();
+           statsDesc.setDbName(newDbName);
+           statsDesc.setTableName(newTableName);
+           colStats.setStatsObj(newStatsObjs);
+           msdb.updateTableColumnStatistics(colStats);
+         }
+     } catch (NoSuchObjectException nsoe) {
+       LOG.debug("Could not find db entry." + nsoe);
+     } catch (InvalidInputException e) {
+       //should not happen since the input were verified before passed in
+       throw new InvalidObjectException("Invalid inputs to update table column stats: " + e);
+     }
+   }
+ 
+   private ColumnStatistics updateOrGetPartitionColumnStats(
+       RawStore msdb, String catName, String dbname, String tblname, List<String> partVals,
+       List<FieldSchema> oldCols, Table table, Partition part, List<FieldSchema> newCols)
+           throws MetaException, InvalidObjectException {
+     ColumnStatistics newPartsColStats = null;
+     try {
+       // if newCols are not specified, use default ones.
+       if (newCols == null) {
+         newCols = part.getSd() == null ? new ArrayList<>() : part.getSd().getCols();
+       }
+       String oldPartName = Warehouse.makePartName(table.getPartitionKeys(), partVals);
+       String newPartName = Warehouse.makePartName(table.getPartitionKeys(), part.getValues());
+       boolean rename = !part.getDbName().equals(dbname) || !part.getTableName().equals(tblname)
+           || !oldPartName.equals(newPartName);
+ 
+       // do not need to update column stats if alter partition is not for rename or changing existing columns
+       if (!rename && MetaStoreUtils.columnsIncludedByNameType(oldCols, newCols)) {
+         return newPartsColStats;
+       }
+       List<String> oldColNames = new ArrayList<>(oldCols.size());
+       for (FieldSchema oldCol : oldCols) {
+         oldColNames.add(oldCol.getName());
+       }
+       List<String> oldPartNames = Lists.newArrayList(oldPartName);
++      // Note: doesn't take txn stats into account. This method can only remove stats.
+       List<ColumnStatistics> partsColStats = msdb.getPartitionColumnStatistics(catName, dbname, tblname,
+           oldPartNames, oldColNames);
+       assert (partsColStats.size() <= 1);
+       for (ColumnStatistics partColStats : partsColStats) { //actually only at most one loop
+         List<ColumnStatisticsObj> newStatsObjs = new ArrayList<>();
+         List<ColumnStatisticsObj> statsObjs = partColStats.getStatsObj();
+         List<String> deletedCols = new ArrayList<>();
+         for (ColumnStatisticsObj statsObj : statsObjs) {
+           boolean found =false;
+           for (FieldSchema newCol : newCols) {
+             if (statsObj.getColName().equalsIgnoreCase(newCol.getName())
+                 && statsObj.getColType().equalsIgnoreCase(newCol.getType())) {
+               found = true;
+               break;
+             }
+           }
+           if (found) {
+             if (rename) {
+               msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                   partVals, statsObj.getColName());
+               newStatsObjs.add(statsObj);
+             }
+           } else {
+             msdb.deletePartitionColumnStatistics(catName, dbname, tblname, partColStats.getStatsDesc().getPartName(),
+                 partVals, statsObj.getColName());
+             deletedCols.add(statsObj.getColName());
+           }
+         }
+         StatsSetupConst.removeColumnStatsState(part.getParameters(), deletedCols);
+         if (!newStatsObjs.isEmpty()) {
+           partColStats.setStatsObj(newStatsObjs);
+           newPartsColStats = partColStats;
+         }
+       }
+     } catch (NoSuchObjectException nsoe) {
+       // ignore this exception, actually this exception won't be thrown from getPartitionColumnStatistics
+     } catch (InvalidInputException iie) {
+       throw new InvalidObjectException("Invalid input to delete partition column stats." + iie);
+     }
+ 
+     return newPartsColStats;
+   }
+ 
+   private void checkColTypeChangeCompatible(List<FieldSchema> oldCols, List<FieldSchema> newCols)
+       throws InvalidOperationException {
+     List<String> incompatibleCols = new ArrayList<>();
+     int maxCols = Math.min(oldCols.size(), newCols.size());
+     for (int i = 0; i < maxCols; i++) {
+       if (!ColumnType.areColTypesCompatible(
+           ColumnType.getTypeName(oldCols.get(i).getType()),
+           ColumnType.getTypeName(newCols.get(i).getType()))) {
+         incompatibleCols.add(newCols.get(i).getName());
+       }
+     }
+     if (!incompatibleCols.isEmpty()) {
+       throw new InvalidOperationException(
+           "The following columns have types incompatible with the existing " +
+               "columns in their respective positions :\n" +
+               org.apache.commons.lang.StringUtils.join(incompatibleCols, ',')
+       );
+     }
+   }
+ 
+ }